patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -167,6 +167,19 @@ func TestPullImageGlobalTimeout(t *testing.T) {
wait.Done()
}
+func TestPullImageInactivityTimeout(t *testing.T) {
+ mockDocker, client, testTime, _, _, done := dockerClientSetup(t)
+ defer done()
+
+ testTime.EXPECT().After(gomock.Any()).AnyTimes()
+ mockDocker.EXPECT().PullImage(&pullImageOptsMatcher{"image:latest"}, gomock.Any()).Return(
+ docker.ErrInactivityTimeout).Times(maximumPullRetries) // expected number of retries
+
+ metadata := client.PullImage("image", nil)
+ assert.Error(t, metadata.Error, "Expected error for pull inactivity timeout")
+ assert.Equal(t, "CannotPullContainerError", metadata.Error.(api.NamedError).ErrorName(), "Wrong error type")
+}
+
func TestPullImage(t *testing.T) {
mockDocker, client, testTime, _, _, done := dockerClientSetup(t)
defer done() | 1 | // +build !integration
// Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package engine
import (
"encoding/base64"
"errors"
"io"
"reflect"
"strconv"
"sync"
"testing"
"time"
"github.com/aws/amazon-ecs-agent/agent/api"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/ecr/mocks"
ecrapi "github.com/aws/amazon-ecs-agent/agent/ecr/model/ecr"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerclient/mocks"
"github.com/aws/amazon-ecs-agent/agent/engine/dockeriface/mocks"
"github.com/aws/amazon-ecs-agent/agent/engine/emptyvolume"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime/mocks"
"context"
"github.com/aws/aws-sdk-go/aws"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// xContainerShortTimeout is a short duration intended to be used by the
// docker client APIs that test if the underlying context gets canceled
// upon the expiration of the timeout duration.
const xContainerShortTimeout = 1 * time.Millisecond
func defaultTestConfig() *config.Config {
cfg, _ := config.NewConfig(ec2.NewBlackholeEC2MetadataClient())
return cfg
}
func dockerClientSetup(t *testing.T) (
*mock_dockeriface.MockClient,
*dockerGoClient,
*mock_ttime.MockTime,
*gomock.Controller,
*mock_ecr.MockECRFactory,
func()) {
return dockerClientSetupWithConfig(t, config.DefaultConfig())
}
func dockerClientSetupWithConfig(t *testing.T, conf config.Config) (
*mock_dockeriface.MockClient,
*dockerGoClient,
*mock_ttime.MockTime,
*gomock.Controller,
*mock_ecr.MockECRFactory,
func()) {
ctrl := gomock.NewController(t)
mockDocker := mock_dockeriface.NewMockClient(ctrl)
mockDocker.EXPECT().Ping().AnyTimes().Return(nil)
factory := mock_dockerclient.NewMockFactory(ctrl)
factory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDocker, nil)
mockTime := mock_ttime.NewMockTime(ctrl)
conf.EngineAuthData = config.NewSensitiveRawMessage([]byte{})
client, _ := NewDockerGoClient(factory, &conf)
goClient, _ := client.(*dockerGoClient)
ecrClientFactory := mock_ecr.NewMockECRFactory(ctrl)
goClient.ecrClientFactory = ecrClientFactory
goClient._time = mockTime
return mockDocker, goClient, mockTime, ctrl, ecrClientFactory, ctrl.Finish
}
type pullImageOptsMatcher struct {
image string
}
func (matcher *pullImageOptsMatcher) String() string {
return "matches " + matcher.image
}
func (matcher *pullImageOptsMatcher) Matches(x interface{}) bool {
return matcher.image == x.(docker.PullImageOptions).Repository
}
func TestPullImageOutputTimeout(t *testing.T) {
mockDocker, client, testTime, _, _, done := dockerClientSetup(t)
defer done()
pullBeginTimeout := make(chan time.Time)
testTime.EXPECT().After(dockerPullBeginTimeout).Return(pullBeginTimeout).MinTimes(1)
testTime.EXPECT().After(pullImageTimeout).MinTimes(1)
wait := sync.WaitGroup{}
wait.Add(1)
// multiple invocations will happen due to retries, but all should timeout
mockDocker.EXPECT().PullImage(&pullImageOptsMatcher{"image:latest"}, gomock.Any()).Do(
func(x, y interface{}) {
pullBeginTimeout <- time.Now()
wait.Wait()
// Don't return, verify timeout happens
}).Times(maximumPullRetries) // expected number of retries
metadata := client.PullImage("image", nil)
if metadata.Error == nil {
t.Error("Expected error for pull timeout")
}
if metadata.Error.(api.NamedError).ErrorName() != "DockerTimeoutError" {
t.Error("Wrong error type")
}
// cleanup
wait.Done()
}
func TestPullImageGlobalTimeout(t *testing.T) {
mockDocker, client, testTime, _, _, done := dockerClientSetup(t)
defer done()
pullBeginTimeout := make(chan time.Time, 1)
testTime.EXPECT().After(dockerPullBeginTimeout).Return(pullBeginTimeout)
pullTimeout := make(chan time.Time, 1)
testTime.EXPECT().After(pullImageTimeout).Return(pullTimeout)
wait := sync.WaitGroup{}
wait.Add(1)
mockDocker.EXPECT().PullImage(&pullImageOptsMatcher{"image:latest"}, gomock.Any()).Do(func(x, y interface{}) {
opts, ok := x.(docker.PullImageOptions)
if !ok {
t.Error("Cannot cast argument to PullImageOptions")
}
io.WriteString(opts.OutputStream, "string\n")
pullBeginTimeout <- time.Now()
pullTimeout <- time.Now()
wait.Wait()
// Don't return, verify timeout happens
})
metadata := client.PullImage("image", nil)
if metadata.Error == nil {
t.Error("Expected error for pull timeout")
}
if metadata.Error.(api.NamedError).ErrorName() != "DockerTimeoutError" {
t.Error("Wrong error type")
}
testTime.EXPECT().After(dockerPullBeginTimeout)
testTime.EXPECT().After(pullImageTimeout)
mockDocker.EXPECT().PullImage(&pullImageOptsMatcher{"image2:latest"}, gomock.Any())
_ = client.PullImage("image2", nil)
// cleanup
wait.Done()
}
func TestPullImage(t *testing.T) {
mockDocker, client, testTime, _, _, done := dockerClientSetup(t)
defer done()
testTime.EXPECT().After(gomock.Any()).AnyTimes()
mockDocker.EXPECT().PullImage(&pullImageOptsMatcher{"image:latest"}, gomock.Any()).Return(nil)
metadata := client.PullImage("image", nil)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
}
func TestPullImageTag(t *testing.T) {
mockDocker, client, testTime, _, _, done := dockerClientSetup(t)
defer done()
testTime.EXPECT().After(gomock.Any()).AnyTimes()
mockDocker.EXPECT().PullImage(&pullImageOptsMatcher{"image:mytag"}, gomock.Any()).Return(nil)
metadata := client.PullImage("image:mytag", nil)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
}
func TestPullImageDigest(t *testing.T) {
mockDocker, client, testTime, _, _, done := dockerClientSetup(t)
defer done()
testTime.EXPECT().After(gomock.Any()).AnyTimes()
mockDocker.EXPECT().PullImage(
&pullImageOptsMatcher{"image@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb"},
gomock.Any(),
).Return(nil)
metadata := client.PullImage("image@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb", nil)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
}
func TestPullImageECRSuccess(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockDocker := mock_dockeriface.NewMockClient(ctrl)
mockDocker.EXPECT().Ping().AnyTimes().Return(nil)
factory := mock_dockerclient.NewMockFactory(ctrl)
factory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDocker, nil)
client, _ := NewDockerGoClient(factory, defaultTestConfig())
goClient, _ := client.(*dockerGoClient)
ecrClientFactory := mock_ecr.NewMockECRFactory(ctrl)
ecrClient := mock_ecr.NewMockECRClient(ctrl)
mockTime := mock_ttime.NewMockTime(ctrl)
goClient.ecrClientFactory = ecrClientFactory
goClient._time = mockTime
mockTime.EXPECT().After(gomock.Any()).AnyTimes()
registryID := "123456789012"
region := "eu-west-1"
endpointOverride := "my.endpoint"
authData := &api.RegistryAuthenticationData{
Type: "ecr",
ECRAuthData: &api.ECRAuthData{
RegistryID: registryID,
Region: region,
EndpointOverride: endpointOverride,
},
}
imageEndpoint := "registry.endpoint"
image := imageEndpoint + "/myimage:tag"
username := "username"
password := "password"
dockerAuthConfiguration := docker.AuthConfiguration{
Username: username,
Password: password,
ServerAddress: "https://" + imageEndpoint,
}
ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil)
ecrClient.EXPECT().GetAuthorizationToken(registryID).Return(
&ecrapi.AuthorizationData{
ProxyEndpoint: aws.String("https://" + imageEndpoint),
AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))),
}, nil)
mockDocker.EXPECT().PullImage(
&pullImageOptsMatcher{image},
dockerAuthConfiguration,
).Return(nil)
metadata := client.PullImage(image, authData)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
}
func TestPullImageECRAuthFail(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockDocker := mock_dockeriface.NewMockClient(ctrl)
mockDocker.EXPECT().Ping().AnyTimes().Return(nil)
factory := mock_dockerclient.NewMockFactory(ctrl)
factory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDocker, nil)
client, _ := NewDockerGoClient(factory, defaultTestConfig())
goClient, _ := client.(*dockerGoClient)
ecrClientFactory := mock_ecr.NewMockECRFactory(ctrl)
ecrClient := mock_ecr.NewMockECRClient(ctrl)
mockTime := mock_ttime.NewMockTime(ctrl)
goClient.ecrClientFactory = ecrClientFactory
goClient._time = mockTime
mockTime.EXPECT().After(gomock.Any()).AnyTimes()
registryID := "123456789012"
region := "eu-west-1"
endpointOverride := "my.endpoint"
authData := &api.RegistryAuthenticationData{
Type: "ecr",
ECRAuthData: &api.ECRAuthData{
RegistryID: registryID,
Region: region,
EndpointOverride: endpointOverride,
},
}
imageEndpoint := "registry.endpoint"
image := imageEndpoint + "/myimage:tag"
// no retries for this error
ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil)
ecrClient.EXPECT().GetAuthorizationToken(gomock.Any()).Return(nil, errors.New("test error"))
metadata := client.PullImage(image, authData)
assert.Error(t, metadata.Error, "expected pull to fail")
}
func TestGetRepositoryWithTaggedImage(t *testing.T) {
image := "registry.endpoint/myimage:tag"
respository := getRepository(image)
assert.Equal(t, image, respository)
}
func TestGetRepositoryWithUntaggedImage(t *testing.T) {
image := "registry.endpoint/myimage"
respository := getRepository(image)
assert.Equal(t, image+":"+dockerDefaultTag, respository)
}
func TestImportLocalEmptyVolumeImage(t *testing.T) {
mockDocker, client, testTime, _, _, done := dockerClientSetup(t)
defer done()
// The special emptyvolume image leads to a create, not pull
testTime.EXPECT().After(gomock.Any()).AnyTimes()
gomock.InOrder(
mockDocker.EXPECT().InspectImage(emptyvolume.Image+":"+emptyvolume.Tag).Return(nil, errors.New("Does not exist")),
mockDocker.EXPECT().ImportImage(gomock.Any()).Do(func(x interface{}) {
req := x.(docker.ImportImageOptions)
require.Equal(t, emptyvolume.Image, req.Repository, "expected empty volume repository")
require.Equal(t, emptyvolume.Tag, req.Tag, "expected empty volume tag")
}),
)
metadata := client.ImportLocalEmptyVolumeImage()
assert.NoError(t, metadata.Error, "Expected import to succeed")
}
func TestImportLocalEmptyVolumeImageExisting(t *testing.T) {
mockDocker, client, testTime, _, _, done := dockerClientSetup(t)
defer done()
// The special emptyvolume image leads to a create only if it doesn't exist
testTime.EXPECT().After(gomock.Any()).AnyTimes()
gomock.InOrder(
mockDocker.EXPECT().InspectImage(emptyvolume.Image+":"+emptyvolume.Tag).Return(&docker.Image{}, nil),
)
metadata := client.ImportLocalEmptyVolumeImage()
assert.NoError(t, metadata.Error, "Expected import to succeed")
}
func TestCreateContainerTimeout(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
warp := make(chan time.Time)
wait := &sync.WaitGroup{}
wait.Add(1)
config := docker.CreateContainerOptions{Config: &docker.Config{Memory: 100}, Name: "containerName"}
mockDocker.EXPECT().CreateContainer(gomock.Any()).Do(func(x interface{}) {
warp <- time.Now()
wait.Wait()
// Don't return, verify timeout happens
// TODO remove the MaxTimes by cancel the context passed to CreateContainer
// when issue #1212 is resolved
}).MaxTimes(1)
metadata := client.CreateContainer(config.Config, nil, config.Name, xContainerShortTimeout)
assert.Error(t, metadata.Error, "expected error for pull timeout")
assert.Equal(t, "DockerTimeoutError", metadata.Error.(api.NamedError).ErrorName())
wait.Done()
}
func TestCreateContainerInspectTimeout(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
wait := &sync.WaitGroup{}
wait.Add(1)
config := docker.CreateContainerOptions{Config: &docker.Config{Memory: 100}, Name: "containerName"}
gomock.InOrder(
mockDocker.EXPECT().CreateContainer(gomock.Any()).Do(func(opts docker.CreateContainerOptions) {
if !reflect.DeepEqual(opts.Config, config.Config) {
t.Errorf("Mismatch in create container config, %v != %v", opts.Config, config.Config)
}
if opts.Name != config.Name {
t.Errorf("Mismatch in create container options, %s != %s", opts.Name, config.Name)
}
}).Return(&docker.Container{ID: "id"}, nil),
mockDocker.EXPECT().InspectContainerWithContext("id", gomock.Any()).Return(nil, &DockerTimeoutError{}),
)
metadata := client.CreateContainer(config.Config, nil, config.Name, 1*time.Second)
if metadata.DockerID != "id" {
t.Error("Expected ID to be set even if inspect failed; was " + metadata.DockerID)
}
if metadata.Error == nil {
t.Error("Expected error for inspect timeout")
}
wait.Done()
}
func TestCreateContainer(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
config := docker.CreateContainerOptions{Config: &docker.Config{Memory: 100}, Name: "containerName"}
gomock.InOrder(
mockDocker.EXPECT().CreateContainer(gomock.Any()).Do(func(opts docker.CreateContainerOptions) {
if !reflect.DeepEqual(opts.Config, config.Config) {
t.Errorf("Mismatch in create container config, %v != %v", opts.Config, config.Config)
}
if opts.Name != config.Name {
t.Errorf("Mismatch in create container options, %s != %s", opts.Name, config.Name)
}
}).Return(&docker.Container{ID: "id"}, nil),
mockDocker.EXPECT().InspectContainerWithContext("id", gomock.Any()).Return(&docker.Container{ID: "id"}, nil),
)
metadata := client.CreateContainer(config.Config, nil, config.Name, 1*time.Second)
if metadata.Error != nil {
t.Error("Did not expect error")
}
if metadata.DockerID != "id" {
t.Error("Wrong id")
}
if metadata.ExitCode != nil {
t.Error("Expected a created container to not have an exit code")
}
}
func TestStartContainerTimeout(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
testDone := make(chan struct{})
wait := &sync.WaitGroup{}
wait.Add(1)
mockDocker.EXPECT().StartContainerWithContext("id", nil, gomock.Any()).Do(func(x, y, z interface{}) {
wait.Wait() // wait until timeout happens
close(testDone)
})
// TODO This should be MaxTimes(1) after we update gomock
mockDocker.EXPECT().InspectContainerWithContext("id", gomock.Any()).Return(nil, errors.New("test error")).AnyTimes()
metadata := client.StartContainer("id", xContainerShortTimeout)
assert.NotNil(t, metadata.Error, "Expected error for pull timeout")
assert.Equal(t, "DockerTimeoutError", metadata.Error.(api.NamedError).ErrorName(), "Wrong error type")
wait.Done()
<-testDone
}
func TestStartContainer(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
gomock.InOrder(
mockDocker.EXPECT().StartContainerWithContext("id", nil, gomock.Any()).Return(nil),
mockDocker.EXPECT().InspectContainerWithContext("id", gomock.Any()).Return(&docker.Container{ID: "id"}, nil),
)
metadata := client.StartContainer("id", startContainerTimeout)
if metadata.Error != nil {
t.Error("Did not expect error")
}
if metadata.DockerID != "id" {
t.Error("Wrong id")
}
}
func TestStopContainerTimeout(t *testing.T) {
cfg := config.DefaultConfig()
cfg.DockerStopTimeout = xContainerShortTimeout
mockDocker, client, _, _, _, done := dockerClientSetupWithConfig(t, cfg)
defer done()
warp := make(chan time.Time)
wait := &sync.WaitGroup{}
wait.Add(1)
mockDocker.EXPECT().StopContainerWithContext("id", uint(client.config.DockerStopTimeout/time.Second), gomock.Any()).Do(func(x, y, z interface{}) {
warp <- time.Now()
wait.Wait()
// Don't return, verify timeout happens
})
metadata := client.StopContainer("id", xContainerShortTimeout)
if metadata.Error == nil {
t.Error("Expected error for pull timeout")
}
if metadata.Error.(api.NamedError).ErrorName() != "DockerTimeoutError" {
t.Error("Wrong error type")
}
wait.Done()
}
func TestStopContainer(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
gomock.InOrder(
mockDocker.EXPECT().StopContainerWithContext("id", uint(client.config.DockerStopTimeout/time.Second), gomock.Any()).Return(nil),
mockDocker.EXPECT().InspectContainerWithContext("id", gomock.Any()).Return(&docker.Container{ID: "id", State: docker.State{ExitCode: 10}}, nil),
)
metadata := client.StopContainer("id", stopContainerTimeout)
if metadata.Error != nil {
t.Error("Did not expect error")
}
if metadata.DockerID != "id" {
t.Error("Wrong id")
}
}
func TestInspectContainerTimeout(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
warp := make(chan time.Time)
wait := &sync.WaitGroup{}
wait.Add(1)
mockDocker.EXPECT().InspectContainerWithContext("id", gomock.Any()).Do(func(x, ctx interface{}) {
warp <- time.Now()
wait.Wait()
// Don't return, verify timeout happens
})
_, err := client.InspectContainer("id", xContainerShortTimeout)
if err == nil {
t.Error("Expected error for inspect timeout")
}
if err.(api.NamedError).ErrorName() != "DockerTimeoutError" {
t.Error("Wrong error type")
}
wait.Done()
}
func TestInspectContainer(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
containerOutput := docker.Container{ID: "id",
State: docker.State{
ExitCode: 10,
Health: docker.Health{
Status: "healthy",
Log: []docker.HealthCheck{
{
ExitCode: 1,
Output: "health output",
},
},
}}}
gomock.InOrder(
mockDocker.EXPECT().InspectContainerWithContext("id", gomock.Any()).Return(&containerOutput, nil),
)
container, err := client.InspectContainer("id", inspectContainerTimeout)
if err != nil {
t.Error("Did not expect error")
}
if !reflect.DeepEqual(&containerOutput, container) {
t.Fatal("Did not match expected output")
}
}
func TestContainerEvents(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
var events chan<- *docker.APIEvents
mockDocker.EXPECT().AddEventListener(gomock.Any()).Do(func(x interface{}) {
events = x.(chan<- *docker.APIEvents)
})
dockerEvents, err := client.ContainerEvents(context.TODO())
require.NoError(t, err, "Could not get container events")
mockDocker.EXPECT().InspectContainerWithContext("containerId", gomock.Any()).Return(
&docker.Container{
ID: "containerId",
},
nil)
go func() {
events <- &docker.APIEvents{Type: "container", ID: "containerId", Status: "create"}
}()
event := <-dockerEvents
assert.Equal(t, event.DockerID, "containerId", "Wrong docker id")
assert.Equal(t, event.Status, api.ContainerCreated, "Wrong status")
container := &docker.Container{
ID: "cid2",
NetworkSettings: &docker.NetworkSettings{
Ports: map[docker.Port][]docker.PortBinding{
"80/tcp": {{HostPort: "9001"}},
},
},
Volumes: map[string]string{"/host/path": "/container/path"},
}
mockDocker.EXPECT().InspectContainerWithContext("cid2", gomock.Any()).Return(container, nil)
go func() {
events <- &docker.APIEvents{Type: "container", ID: "cid2", Status: "start"}
}()
event = <-dockerEvents
assert.Equal(t, event.DockerID, "cid2", "Wrong docker id")
assert.Equal(t, event.Status, api.ContainerRunning, "Wrong status")
assert.Equal(t, event.PortBindings[0].ContainerPort, uint16(80), "Incorrect port bindings")
assert.Equal(t, event.PortBindings[0].HostPort, uint16(9001), "Incorrect port bindings")
assert.Equal(t, event.Volumes["/host/path"], "/container/path", "Incorrect volume mapping")
for i := 0; i < 2; i++ {
stoppedContainer := &docker.Container{
ID: "cid3" + strconv.Itoa(i),
State: docker.State{
FinishedAt: time.Now(),
ExitCode: 20,
},
}
mockDocker.EXPECT().InspectContainerWithContext("cid3"+strconv.Itoa(i), gomock.Any()).Return(stoppedContainer, nil)
}
go func() {
events <- &docker.APIEvents{Type: "container", ID: "cid30", Status: "stop"}
events <- &docker.APIEvents{Type: "container", ID: "cid31", Status: "die"}
}()
for i := 0; i < 2; i++ {
anEvent := <-dockerEvents
assert.True(t, anEvent.DockerID == "cid30" || anEvent.DockerID == "cid31", "Wrong container id: "+anEvent.DockerID)
assert.Equal(t, anEvent.Status, api.ContainerStopped, "Should be stopped")
assert.Equal(t, aws.IntValue(anEvent.ExitCode), 20, "Incorrect exit code")
}
containerWithHealthInfo := &docker.Container{
ID: "container_health",
State: docker.State{
Health: docker.Health{
Status: "healthy",
Log: []docker.HealthCheck{
{
ExitCode: 1,
Output: "health output",
},
},
},
},
}
mockDocker.EXPECT().InspectContainerWithContext("container_health", gomock.Any()).Return(containerWithHealthInfo, nil)
go func() {
events <- &docker.APIEvents{
Type: "container",
ID: "container_health",
Action: "health_status: unhealthy",
Status: "health_status: unhealthy",
Actor: docker.APIActor{
ID: "container_health",
},
}
}()
anEvent := <-dockerEvents
assert.Equal(t, anEvent.Type, api.ContainerHealthEvent, "unexpected docker events type received")
assert.Equal(t, anEvent.Health.Status, api.ContainerHealthy)
assert.Equal(t, anEvent.Health.Output, "health output")
// Verify the following events do not translate into our event stream
//
// Docker 1.8.3 sends the full command appended to exec_create and exec_start
// events. Test that we ignore there as well..
//
ignore := []string{
"pause",
"exec_create",
"exec_create: /bin/bash",
"exec_start",
"exec_start: /bin/bash",
"top",
"attach",
"export",
"pull",
"push",
"tag",
"untag",
"import",
"delete",
"oom",
"kill",
}
for _, eventStatus := range ignore {
events <- &docker.APIEvents{Type: "container", ID: "123", Status: eventStatus}
select {
case <-dockerEvents:
t.Error("No event should be available for " + eventStatus)
default:
}
}
// Verify only the container type event will translate to our event stream
// Events type: network, image, volume, daemon, plugins won't be handled
ignoreEventType := map[string]string{
"network": "connect",
"image": "pull",
"volume": "create",
"plugin": "install",
"daemon": "reload",
}
for eventType, eventStatus := range ignoreEventType {
events <- &docker.APIEvents{Type: eventType, ID: "123", Status: eventStatus}
select {
case <-dockerEvents:
t.Errorf("No event should be available for %v", eventType)
default:
}
}
}
func TestDockerVersion(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
mockDocker.EXPECT().Version().Return(&docker.Env{"Version=1.6.0"}, nil)
str, err := client.Version()
if err != nil {
t.Error(err)
}
if str != "1.6.0" {
t.Error("Got unexpected version string: " + str)
}
}
func TestListContainers(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
containers := []docker.APIContainers{{ID: "id"}}
mockDocker.EXPECT().ListContainers(gomock.Any()).Return(containers, nil)
response := client.ListContainers(true, ListContainersTimeout)
if response.Error != nil {
t.Error("Did not expect error")
}
containerIds := response.DockerIDs
if len(containerIds) != 1 {
t.Error("Unexpected number of containers in list: ", len(containerIds))
}
if containerIds[0] != "id" {
t.Error("Unexpected container id in the list: ", containerIds[0])
}
}
func TestListContainersTimeout(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
warp := make(chan time.Time)
wait := &sync.WaitGroup{}
wait.Add(1)
mockDocker.EXPECT().ListContainers(gomock.Any()).Do(func(x interface{}) {
warp <- time.Now()
wait.Wait()
// Don't return, verify timeout happens
})
response := client.ListContainers(true, xContainerShortTimeout)
if response.Error == nil {
t.Error("Expected error for pull timeout")
}
if response.Error.(api.NamedError).ErrorName() != "DockerTimeoutError" {
t.Error("Wrong error type")
}
<-warp
wait.Done()
}
func TestPingFailError(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockDocker := mock_dockeriface.NewMockClient(ctrl)
mockDocker.EXPECT().Ping().Return(errors.New("err"))
factory := mock_dockerclient.NewMockFactory(ctrl)
factory.EXPECT().GetDefaultClient().Return(mockDocker, nil)
_, err := NewDockerGoClient(factory, defaultTestConfig())
if err == nil {
t.Fatal("Expected ping error to result in constructor fail")
}
}
func TestUsesVersionedClient(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockDocker := mock_dockeriface.NewMockClient(ctrl)
mockDocker.EXPECT().Ping().Return(nil)
factory := mock_dockerclient.NewMockFactory(ctrl)
factory.EXPECT().GetDefaultClient().Return(mockDocker, nil)
client, err := NewDockerGoClient(factory, defaultTestConfig())
if err != nil {
t.Fatal(err)
}
vclient := client.WithVersion(dockerclient.DockerVersion("1.20"))
factory.EXPECT().GetClient(dockerclient.DockerVersion("1.20")).Times(2).Return(mockDocker, nil)
mockDocker.EXPECT().StartContainerWithContext(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
mockDocker.EXPECT().InspectContainerWithContext(gomock.Any(), gomock.Any()).Return(nil, errors.New("err"))
vclient.StartContainer("foo", startContainerTimeout)
}
func TestUnavailableVersionError(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockDocker := mock_dockeriface.NewMockClient(ctrl)
mockDocker.EXPECT().Ping().Return(nil)
factory := mock_dockerclient.NewMockFactory(ctrl)
factory.EXPECT().GetDefaultClient().Return(mockDocker, nil)
client, err := NewDockerGoClient(factory, defaultTestConfig())
if err != nil {
t.Fatal(err)
}
vclient := client.WithVersion(dockerclient.DockerVersion("1.21"))
factory.EXPECT().GetClient(dockerclient.DockerVersion("1.21")).Times(1).Return(nil, errors.New("Cannot get client"))
metadata := vclient.StartContainer("foo", startContainerTimeout)
if metadata.Error == nil {
t.Fatal("Expected error, didn't get one")
}
if namederr, ok := metadata.Error.(api.NamedError); ok {
if namederr.ErrorName() != "CannotGetDockerclientError" {
t.Fatal("Wrong error name, expected CannotGetDockerclientError but got " + namederr.ErrorName())
}
} else {
t.Fatal("Error was not a named error")
}
}
func TestStatsNormalExit(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
time1 := time.Now()
time2 := time1.Add(1 * time.Second)
mockDocker.EXPECT().Stats(gomock.Any()).Do(func(x interface{}) {
opts := x.(docker.StatsOptions)
defer close(opts.Stats)
if opts.ID != "foo" {
t.Fatalf("Expected ID foo, got %s", opts.ID)
}
if opts.Stream != true {
t.Fatal("Expected stream to be true")
}
opts.Stats <- &docker.Stats{
Read: time1,
}
opts.Stats <- &docker.Stats{
Read: time2,
}
})
ctx := context.TODO()
stats, err := client.Stats("foo", ctx)
if err != nil {
t.Fatal(err)
}
stat := <-stats
checkStatRead(t, stat, time1)
stat = <-stats
checkStatRead(t, stat, time2)
stat = <-stats
if stat != nil {
t.Fatal("Expected stat to be nil")
}
}
func checkStatRead(t *testing.T, stat *docker.Stats, read time.Time) {
if stat.Read != read {
t.Fatalf("Expected %v, but was %v", read, stat.Read)
}
}
func TestStatsClosed(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
time1 := time.Now()
mockDocker.EXPECT().Stats(gomock.Any()).Do(func(x interface{}) {
opts := x.(docker.StatsOptions)
defer close(opts.Stats)
if opts.ID != "foo" {
t.Fatalf("Expected ID foo, got %s", opts.ID)
}
if opts.Stream != true {
t.Fatal("Expected stream to be true")
}
for i := 0; true; i++ {
select {
case <-opts.Context.Done():
t.Logf("Received cancel after %d iterations", i)
return
default:
opts.Stats <- &docker.Stats{
Read: time1.Add(time.Duration(i) * time.Second),
}
}
}
})
ctx, cancel := context.WithCancel(context.TODO())
stats, err := client.Stats("foo", ctx)
if err != nil {
t.Fatal(err)
}
stat := <-stats
checkStatRead(t, stat, time1)
stat = <-stats
checkStatRead(t, stat, time1.Add(time.Second))
cancel()
// drain
for {
stat = <-stats
if stat == nil {
break
}
}
}
func TestStatsErrorReading(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
mockDocker.EXPECT().Stats(gomock.Any()).Do(func(x interface{}) error {
opts := x.(docker.StatsOptions)
close(opts.Stats)
return errors.New("test error")
})
ctx := context.TODO()
stats, err := client.Stats("foo", ctx)
if err != nil {
t.Fatal(err)
}
stat := <-stats
if stat != nil {
t.Fatal("Expected stat to be nil")
}
}
func TestStatsClientError(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
factory := mock_dockerclient.NewMockFactory(ctrl)
factory.EXPECT().GetDefaultClient().Return(nil, errors.New("No client"))
client := &dockerGoClient{
clientFactory: factory,
}
ctx := context.TODO()
_, err := client.Stats("foo", ctx)
if err == nil {
t.Fatal("Expected error with nil docker client")
}
}
func TestRemoveImageTimeout(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
wait := sync.WaitGroup{}
wait.Add(1)
mockDocker.EXPECT().RemoveImage("image").Do(func(x interface{}) {
wait.Wait()
})
err := client.RemoveImage("image", 2*time.Millisecond)
if err == nil {
t.Errorf("Expected error for remove image timeout")
}
wait.Done()
}
func TestRemoveImage(t *testing.T) {
mockDocker, client, testTime, _, _, done := dockerClientSetup(t)
defer done()
testTime.EXPECT().After(gomock.Any()).AnyTimes()
mockDocker.EXPECT().RemoveImage("image").Return(nil)
err := client.RemoveImage("image", 2*time.Millisecond)
if err != nil {
t.Errorf("Did not expect error, err: %v", err)
}
}
// TestContainerMetadataWorkaroundIssue27601 tests the workaround for
// issue https://github.com/moby/moby/issues/27601
func TestContainerMetadataWorkaroundIssue27601(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
mockDocker.EXPECT().InspectContainerWithContext("id", gomock.Any()).Return(&docker.Container{
Mounts: []docker.Mount{{
Destination: "destination1",
Source: "source1",
}, {
Destination: "destination2",
Source: "source2",
}},
}, nil)
metadata := client.containerMetadata("id")
assert.Equal(t, map[string]string{"destination1": "source1", "destination2": "source2"}, metadata.Volumes)
}
func TestLoadImageHappyPath(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
mockDocker.EXPECT().LoadImage(gomock.Any()).Return(nil)
err := client.LoadImage(nil, time.Second)
assert.NoError(t, err)
}
func TestLoadImageTimeoutError(t *testing.T) {
mockDocker, client, _, _, _, done := dockerClientSetup(t)
defer done()
wait := sync.WaitGroup{}
wait.Add(1)
mockDocker.EXPECT().LoadImage(gomock.Any()).Do(func(x interface{}) {
wait.Wait()
})
err := client.LoadImage(nil, time.Millisecond)
assert.Error(t, err)
_, ok := err.(*DockerTimeoutError)
assert.True(t, ok)
wait.Done()
}
// TestECRAuthCache tests the client will use cached docker auth if pulling
// from same registry on ecr with default instance profile
func TestECRAuthCacheWithoutExecutionRole(t *testing.T) {
mockDocker, client, mockTime, ctrl, ecrClientFactory, done := dockerClientSetup(t)
defer done()
mockTime.EXPECT().After(gomock.Any()).AnyTimes()
ecrClient := mock_ecr.NewMockECRClient(ctrl)
region := "eu-west-1"
registryID := "1234567890"
endpointOverride := "my.endpoint"
imageEndpoint := "registry.endpoint"
image := imageEndpoint + "myimage:tag"
authData := &api.RegistryAuthenticationData{
Type: "ecr",
ECRAuthData: &api.ECRAuthData{
RegistryID: registryID,
Region: region,
EndpointOverride: endpointOverride,
},
}
username := "username"
password := "password"
ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil).Times(1)
ecrClient.EXPECT().GetAuthorizationToken(registryID).Return(
&ecrapi.AuthorizationData{
ProxyEndpoint: aws.String("https://" + imageEndpoint),
AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))),
ExpiresAt: aws.Time(time.Now().Add(10 * time.Hour)),
}, nil).Times(1)
mockDocker.EXPECT().PullImage(gomock.Any(), gomock.Any()).Return(nil).Times(4)
metadata := client.PullImage(image, authData)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
// Pull from the same registry shouldn't expect ecr client call
metadata = client.PullImage(image+"2", authData)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
// Pull from the same registry shouldn't expect ecr client call
metadata = client.PullImage(image+"3", authData)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
// Pull from the same registry shouldn't expect ecr client call
metadata = client.PullImage(image+"4", authData)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
}
// TestECRAuthCacheForDifferentRegistry tests the client will call ecr client to get docker
// auth for different registry
func TestECRAuthCacheForDifferentRegistry(t *testing.T) {
mockDocker, client, mockTime, ctrl, ecrClientFactory, done := dockerClientSetup(t)
defer done()
mockTime.EXPECT().After(gomock.Any()).AnyTimes()
ecrClient := mock_ecr.NewMockECRClient(ctrl)
region := "eu-west-1"
registryID := "1234567890"
endpointOverride := "my.endpoint"
imageEndpoint := "registry.endpoint"
image := imageEndpoint + "/myimage:tag"
authData := &api.RegistryAuthenticationData{
Type: "ecr",
ECRAuthData: &api.ECRAuthData{
RegistryID: registryID,
Region: region,
EndpointOverride: endpointOverride,
},
}
username := "username"
password := "password"
ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil).Times(1)
ecrClient.EXPECT().GetAuthorizationToken(registryID).Return(
&ecrapi.AuthorizationData{
ProxyEndpoint: aws.String("https://" + imageEndpoint),
AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))),
ExpiresAt: aws.Time(time.Now().Add(10 * time.Hour)),
}, nil).Times(1)
mockDocker.EXPECT().PullImage(gomock.Any(), gomock.Any()).Return(nil).Times(2)
metadata := client.PullImage(image, authData)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
// Pull from the different registry should expect ECR client call
authData.ECRAuthData.RegistryID = "another"
ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil).Times(1)
ecrClient.EXPECT().GetAuthorizationToken("another").Return(
&ecrapi.AuthorizationData{
ProxyEndpoint: aws.String("https://" + imageEndpoint),
AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))),
ExpiresAt: aws.Time(time.Now().Add(10 * time.Hour)),
}, nil).Times(1)
metadata = client.PullImage(image, authData)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
}
// TestECRAuthCacheWithExecutionRole tests the client will use the cached docker auth
// for ecr when pull from the same registry with same execution role
func TestECRAuthCacheWithSameExecutionRole(t *testing.T) {
mockDocker, client, mockTime, ctrl, ecrClientFactory, done := dockerClientSetup(t)
defer done()
mockTime.EXPECT().After(gomock.Any()).AnyTimes()
ecrClient := mock_ecr.NewMockECRClient(ctrl)
region := "eu-west-1"
registryID := "1234567890"
imageEndpoint := "registry.endpoint"
image := imageEndpoint + "/myimage:tag"
endpointOverride := "my.endpoint"
authData := &api.RegistryAuthenticationData{
Type: "ecr",
ECRAuthData: &api.ECRAuthData{
RegistryID: registryID,
Region: region,
EndpointOverride: endpointOverride,
},
}
authData.ECRAuthData.SetPullCredentials(credentials.IAMRoleCredentials{
RoleArn: "executionRole",
})
username := "username"
password := "password"
ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil).Times(1)
ecrClient.EXPECT().GetAuthorizationToken(registryID).Return(
&ecrapi.AuthorizationData{
ProxyEndpoint: aws.String("https://" + imageEndpoint),
AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))),
ExpiresAt: aws.Time(time.Now().Add(10 * time.Hour)),
}, nil).Times(1)
mockDocker.EXPECT().PullImage(gomock.Any(), gomock.Any()).Return(nil).Times(3)
metadata := client.PullImage(image, authData)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
// Pull from the same registry shouldn't expect ecr client call
metadata = client.PullImage(image+"2", authData)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
// Pull from the same registry shouldn't expect ecr client call
metadata = client.PullImage(image+"3", authData)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
}
// TestECRAuthCacheWithDifferentExecutionRole tests client will call ecr client to get
// docker auth credentials for different execution role
func TestECRAuthCacheWithDifferentExecutionRole(t *testing.T) {
mockDocker, client, mockTime, ctrl, ecrClientFactory, done := dockerClientSetup(t)
defer done()
mockTime.EXPECT().After(gomock.Any()).AnyTimes()
ecrClient := mock_ecr.NewMockECRClient(ctrl)
region := "eu-west-1"
registryID := "1234567890"
imageEndpoint := "registry.endpoint"
image := imageEndpoint + "/myimage:tag"
endpointOverride := "my.endpoint"
authData := &api.RegistryAuthenticationData{
Type: "ecr",
ECRAuthData: &api.ECRAuthData{
RegistryID: registryID,
Region: region,
EndpointOverride: endpointOverride,
},
}
authData.ECRAuthData.SetPullCredentials(credentials.IAMRoleCredentials{
RoleArn: "executionRole",
})
username := "username"
password := "password"
ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil).Times(1)
ecrClient.EXPECT().GetAuthorizationToken(registryID).Return(
&ecrapi.AuthorizationData{
ProxyEndpoint: aws.String("https://" + imageEndpoint),
AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))),
ExpiresAt: aws.Time(time.Now().Add(10 * time.Hour)),
}, nil).Times(1)
mockDocker.EXPECT().PullImage(gomock.Any(), gomock.Any()).Return(nil).Times(2)
metadata := client.PullImage(image, authData)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
// Pull from the same registry but with different role
authData.ECRAuthData.SetPullCredentials(credentials.IAMRoleCredentials{
RoleArn: "executionRole2",
})
ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil).Times(1)
ecrClient.EXPECT().GetAuthorizationToken(registryID).Return(
&ecrapi.AuthorizationData{
ProxyEndpoint: aws.String("https://" + imageEndpoint),
AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))),
ExpiresAt: aws.Time(time.Now().Add(10 * time.Hour)),
}, nil).Times(1)
metadata = client.PullImage(image, authData)
assert.NoError(t, metadata.Error, "Expected pull to succeed")
}
func TestMetadataFromContainer(t *testing.T) {
ports := map[docker.Port][]docker.PortBinding{
docker.Port("80/tcp"): []docker.PortBinding{
{
HostIP: "0.0.0.0",
HostPort: "80",
},
},
}
volumes := map[string]string{
"/foo": "/bar",
}
labels := map[string]string{
"name": "metadata",
}
created := time.Now()
started := time.Now()
finished := time.Now()
dockerContainer := &docker.Container{
NetworkSettings: &docker.NetworkSettings{
Ports: ports,
},
ID: "1234",
Volumes: volumes,
Config: &docker.Config{
Labels: labels,
},
Created: created,
State: docker.State{
Running: true,
StartedAt: started,
FinishedAt: finished,
},
}
metadata := metadataFromContainer(dockerContainer)
assert.Equal(t, "1234", metadata.DockerID)
assert.Equal(t, volumes, metadata.Volumes)
assert.Equal(t, labels, metadata.Labels)
assert.Len(t, metadata.PortBindings, 1)
assert.Equal(t, created, metadata.CreatedAt)
assert.Equal(t, started, metadata.StartedAt)
assert.Equal(t, finished, metadata.FinishedAt)
}
func TestMetadataFromContainerHealthCheckWithNoLogs(t *testing.T) {
dockerContainer := &docker.Container{
State: docker.State{
Health: docker.Health{Status: "unhealthy"},
}}
metadata := metadataFromContainer(dockerContainer)
assert.Equal(t, api.ContainerUnhealthy, metadata.Health.Status)
}
| 1 | 19,185 | Please use the `assert` instead. Same below. | aws-amazon-ecs-agent | go |
@@ -18,6 +18,7 @@ return array(
'A currency with code "%name" already exists.' => 'A currency with code "%name" already exists.',
'A descriptive title' => 'A descriptive title',
'A message with name "%name" already exists.' => 'A message with name "%name" already exists.',
+ 'A name suche as Europe or Overseas' => 'A name suche as Europe or Overseas',
'A product with reference %ref already exists. Please choose another reference.' => 'A product with reference %ref already exists. Please choose another reference.',
'A short description, used when a summary or an introduction is required' => 'A short description, used when a summary or an introduction is required',
'A short text, used when an additional or supplemental information is required.' => 'A short text, used when an additional or supplemental information is required.', | 1 | <?php
return array(
' content create form' => ' content create form',
' note: only non-visible documents can be associated.' => ' note: only non-visible documents can be associated.',
'"%param" parameter cannot be empty in loop type: %type, name: %name' => '"%param" parameter cannot be empty in loop type: %type, name: %name',
'"%param" parameter is missing in loop type: %type, name: %name' => '"%param" parameter is missing in loop type: %type, name: %name',
'%module (version: %version)' => '%module (version: %version)',
'%obj SEO modification' => '%obj SEO modification',
'%obj creation' => '%obj creation',
'%obj modification' => '%obj modification',
'%obj%s deleted successfully' => '%obj%s deleted successfully',
'%s has dependency to module %s. You have to deactivate this module before.' => '%s has dependency to module %s. You have to deactivate this module before.',
'%s have dependencies to module %s. You have to deactivate these modules before.' => '%s have dependencies to module %s. You have to deactivate these modules before.',
'%type% position updated' => '%type% position updated',
'%type% visibility updated' => '%type% visibility updated',
'A comma separated list of email addresses' => 'A comma separated list of email addresses',
'A currency with code "%name" already exists.' => 'A currency with code "%name" already exists.',
'A descriptive title' => 'A descriptive title',
'A message with name "%name" already exists.' => 'A message with name "%name" already exists.',
'A product with reference %ref already exists. Please choose another reference.' => 'A product with reference %ref already exists. Please choose another reference.',
'A short description, used when a summary or an introduction is required' => 'A short description, used when a summary or an introduction is required',
'A short text, used when an additional or supplemental information is required.' => 'A short text, used when an additional or supplemental information is required.',
'A user already exists with this email address. Please login or if you\'ve forgotten your password, go to Reset Your Password.' => 'A user already exists with this email address. Please login or if you\'ve forgotten your password, go to Reset Your Password.',
'A value for attribute "%name" is already present in the combination' => 'A value for attribute "%name" is already present in the combination',
'A variable with name "%name" already exists.' => 'A variable with name "%name" already exists.',
'Activate logs only for these IP Addresses' => 'Activate logs only for these IP Addresses',
'Activate logs only for these files' => 'Activate logs only for these files',
'Activate this sale' => 'Activate this sale',
'Active' => 'Active',
'Add to all product templates' => 'Add to all product templates',
'Additional address' => 'Additional address',
'Additional address information' => 'Additional address information',
'Address ID not found' => 'Address ID not found',
'Address Line 2' => 'Address Line 2',
'Address Line 3' => 'Address Line 3',
'Address creation' => 'Address creation',
'Address label' => 'Address label',
'Address update' => 'Address update',
'Address.' => 'Address.',
'Administrator' => 'Administrator',
'Administrator ID not found' => 'Administrator ID not found',
'Administrators' => 'Administrators',
'Advertise this product as new' => 'Advertise this product as new',
'Alerts' => 'Alerts',
'All Products' => 'All Products',
'All countries' => 'All countries',
'All shipping methods' => 'All shipping methods',
'Alpha code 2 *' => 'Alpha code 2 *',
'Alpha code 3 *' => 'Alpha code 3 *',
'An error happend while copying %prev to %dest' => 'An error happend while copying %prev to %dest',
'An error occurred while adding this file to the archive: %file' => 'An error occurred while adding this file to the archive: %file',
'An unknown error append' => 'An unknown error append',
'Apply exchange rates on price in %sym' => 'Apply exchange rates on price in %sym',
'Archive Format' => 'Archive Format',
'Associate documents' => 'Associate documents',
'Associate images' => 'Associate images',
'At least one of cart products categories is %op% <strong>%categories_list%</strong>' => 'At least one of cart products categories is %op% <strong>%categories_list%</strong>',
'Attribut' => 'Attribut',
'Attribute ID:Attribute AV ID' => 'Attribute ID:Attribute AV ID',
'Attribute value' => 'Attribute value',
'Attributes' => 'Attributes',
'Attributes value' => 'Attributes value',
'Auth mode' => 'Auth mode',
'Available quantity' => 'Available quantity',
'Available quantity *' => 'Available quantity *',
'Available shipping zones' => 'Available shipping zones',
'Back Office' => 'Back Office',
'Bad index value %idx' => 'Bad index value %idx',
'Bad tax list JSON' => 'Bad tax list JSON',
'Billing country' => 'Billing country',
'Billing country is' => 'Billing country is',
'Brand' => 'Brand',
'Brand / Supplier' => 'Brand / Supplier',
'Brand name' => 'Brand name',
'Brands' => 'Brands',
'Business ID' => 'Business ID',
'By Module' => 'By Module',
'CSS' => 'CSS',
'CSS stylesheet' => 'CSS stylesheet',
'Cannot find a default country. Please define one.' => 'Cannot find a default country. Please define one.',
'Cannot find the shop country. Please select a shop country.' => 'Cannot find the shop country. Please select a shop country.',
'Cannot instantiate module "%name%": the namespace is null. Maybe the model is not loaded ?' => 'Cannot instantiate module "%name%": the namespace is null. Maybe the model is not loaded ?',
'Cart' => 'Cart',
'Cart contains at least a product %op% <strong>%products_list%</strong>' => 'Cart contains at least a product %op% <strong>%products_list%</strong>',
'Cart contains categories condition' => 'Cart contains categories condition',
'Cart contains specific products' => 'Cart contains specific products',
'Cart item count' => 'Cart item count',
'Cart item count is' => 'Cart item count is',
'Cart total amount' => 'Cart total amount',
'Cart total amount is' => 'Cart total amount is',
'Catalog' => 'Catalog',
'Catalog configuration' => 'Catalog configuration',
'Categories' => 'Categories',
'Category' => 'Category',
'Category page' => 'Category page',
'Category title *' => 'Category title *',
'Cellphone' => 'Cellphone',
'Change password' => 'Change password',
'Check the total Cart amount in the given currency' => 'Check the total Cart amount in the given currency',
'City' => 'City',
'Combination builder' => 'Combination builder',
'Compagny' => 'Compagny',
'Company' => 'Company',
'Company Name' => 'Company Name',
'Conclusion' => 'Conclusion',
'Configuration' => 'Configuration',
'Confirm Email Address' => 'Confirm your e-mail address',
'Confirm Email address' => 'Confirm your e-mail address',
'Constant amount' => 'Constant amount',
'Constant amount found in one of the product\'s feature' => 'Constant amount found in one of the product\'s feature',
'Contact and sender email address' => 'Contact and sender email address',
'Contact page' => 'Contact page',
'Content' => 'Content',
'Content page' => 'Content page',
'Content title *' => 'Content title *',
'Contents' => 'Contents',
'Core hook of Thelia.' => 'Core hook of Thelia.',
'Countries' => 'Countries',
'Country' => 'Country',
'Country ID not found' => 'Country ID not found',
'Country area' => 'Country area',
'Country title *' => 'Country title *',
'Coupon' => 'Coupon',
'Coupon %code is expired.' => 'Coupon %code is expired.',
'Critical' => 'Critical',
'Curency selection page' => 'Curency selection page',
'Currencies' => 'Currencies',
'Currency' => 'Currency',
'Current Password' => 'Current Password',
'Customer' => 'Customer',
'Customer is %op% <strong>%customer_list%</strong>' => 'Customer is %op% <strong>%customer_list%</strong>',
'Customers' => 'Customers',
'Dashboard' => 'Dashboard',
'Date \'%date\' is invalid, please enter a valid date using %fmt format' => 'Date \'%date\' is invalid, please enter a valid date using %fmt format',
'Debug' => 'Debug',
'Decimal places' => 'Decimal places',
'Default folder *' => 'Default folder *',
'Default product category *' => 'Default product category *',
'Default product sale element' => 'Default product sale element',
'Deleting %obj% for %id% with parent id %parentId%' => 'Deleting %obj% for %id% with parent id %parentId%',
'Delivery' => 'Delivery',
'Delivery choice' => 'Delivery choice',
'Delivery country' => 'Delivery country',
'Delivery country is' => 'Delivery country is',
'Delivery module ID not found' => 'Delivery module ID not found',
'Delivery zone' => 'Delivery zone',
'Detailed description' => 'Detailed description',
'Disabled' => 'Disabled',
'Discount type' => 'Discount type',
'Display initial product prices on front-office' => 'Display initial product prices on front-office',
'Do compress' => 'Do compress',
'Document' => 'Document',
'Don\'t repeat keywords over and over in a row. Rather, put in keyword phrases.' => 'Don\'t repeat keywords over and over in a row. Rather, put in keyword phrases.',
'EAN Code' => 'EAN Code',
'Edit' => 'Edit',
'Edit JavaScript' => 'Edit JavaScript',
'Email Address' => 'Email Address',
'Email address' => 'Email address',
'Email addresses of notification recipients' => 'Email addresses of notification recipients',
'Email test from : %store%' => 'Email test from : %store%',
'Emergency' => 'Emergency',
'Enable remote SMTP use' => 'Enable remote SMTP use',
'Encryption' => 'Encryption',
'End date of sales' => 'End date of sales',
'Enter here the brand name in the default language (%title%)' => 'Enter here the brand name in the default language (%title%)',
'Enter here the mail template purpose in the default language (%title%)' => 'Enter here the mail template purpose in the default language (%title%)',
'Enter here the sale name in the default language (%title%)' => 'Enter here the sale name in the default language (%title%)',
'Equal to' => 'Egal à',
'Error during %action process : %error. Exception was %exc' => 'Error during %action process : %error. Exception was %exc',
'Error occured while processing order ref. %ref, ID %id: %err' => 'Error occured while processing order ref. %ref, ID %id: %err',
'Error while creating the directory "%directory"' => 'Error while creating the directory "%directory"',
'Error while writing the file into the archive, error message: %errmes' => 'Error while writing the file into the archive, error message: %errmes',
'Errors' => 'Errors',
'Errors occurred while importing the file: %errors' => 'Errors occurred while importing the file: %errors',
'Export' => 'Export',
'Fail to delete %obj% for %id% with parent id %parentId% (Exception : %e%)' => 'Fail to delete %obj% for %id% with parent id %parentId% (Exception : %e%)',
'Fail to update %type% position: %err%' => 'Fail to update %type% position: %err%',
'Fail to update %type% visibility: %err%' => 'Fail to update %type% visibility: %err%',
'Failed to create instance of module "%name%" when trying to delete module. Module directory has probably been deleted' => 'Failed to create instance of module "%name%" when trying to delete module. Module directory has probably been deleted',
'Failed to create new hook %s' => 'Failed to create new hook %s',
'Failed to deactivate hook with id %s' => 'Failed to deactivate hook with id %s',
'Failed to find a payment Module with ID=%mid for order ID=%oid' => 'Failed to find a payment Module with ID=%mid for order ID=%oid',
'Failed to load message with code \'%code%\', propably because it does\'nt exists.' => 'Failed to load message with code \'%code%\', propably because it does\'nt exists.',
'Failed to open a writing stream on the file: %file' => 'Failed to open a writing stream on the file: %file',
'Failed to open translation file %file. Please be sure that this file is writable by your Web server' => 'Failed to open translation file %file. Please be sure that this file is writable by your Web server',
'Failed to send message %code. Failed recipients: %failed_addresses' => 'Failed to send message %code. Failed recipients: %failed_addresses',
'Failed to update language definition: %ex' => 'Failed to update language definition: %ex',
'Fax' => 'Fax',
'Feature' => 'Feature',
'Feature value does not match FLOAT format' => 'Feature value does not match FLOAT format',
'Features' => 'Features',
'Features value' => 'Features value',
'File format' => 'File format',
'File is too large, please retry with a file having a size less than %size%.' => 'File is too large, please retry with a file having a size less than %size%.',
'File to upload' => 'File to upload',
'Files with the following extension are not allowed: %extension, please do an archive of the file if you want to upload it' => 'Files with the following extension are not allowed: %extension, please do an archive of the file if you want to upload it',
'First Name' => 'First Name',
'Firstname' => 'Firstname',
'Fixed Amount Discount' => 'Fixed Amount Discount',
'Fixed amount discount for selected attribute values' => 'Fixed amount discount for selected attribute values',
'Fixed amount discount for selected categories' => 'Fixed amount discount for selected categories',
'Fixed amount discount for selected products' => 'Fixed amount discount for selected products',
'Folder' => 'Folder',
'Folder page' => 'Folder page',
'Folder title *' => 'Folder title *',
'For one ore more customers' => 'For one ore more customers',
'Free product when buying one or more selected products' => 'Free product when buying one or more selected products',
'Front Office' => 'Front Office',
'Full Name' => 'Full Name',
'Greater than' => 'Greater than',
'Greater than or equals' => 'Greater than or equals',
'HTML Message' => 'HTML Message',
'HTML layout' => 'HTML layout',
'Home' => 'Home',
'Home page' => 'Home page',
'Hook' => 'Hook',
'Hook block' => 'Hook block',
'Hook code' => 'Hook code',
'Hook create form' => 'Hook create form',
'Hook title' => 'Hook title',
'Hooks' => 'Hooks',
'Host' => 'Host',
'I would like to receive the newsletter or the latest news.' => 'I would like to receive the newsletter or the latest news.',
'ISO 4217 code *' => 'ISO 4217 code *',
'ISO 639-1 Code' => 'ISO 639-1 Code',
'ISO Code *' => 'ISO Code *',
'If a translation is missing or incomplete :' => 'If a translation is missing or incomplete :',
'If cart item count is <strong>%operator%</strong> %quantity%' => 'If cart item count is <strong>%operator%</strong> %quantity%',
'If cart total amount is <strong>%operator%</strong> %amount% %currency%' => 'If cart total amount is <strong>%operator%</strong> %amount% %currency%',
'If checked, this hook will be used by a hook block. If not, by hook function.' => 'If checked, this hook will be used by a hook block. If not, by hook function.',
'Image' => 'Image',
'Import successfully done, %numb row(s) have been changed' => 'Import successfully done, %numb row(s) have been changed',
'Impossible to delete a customer who already have orders' => 'Impossible to delete a customer who already have orders',
'In' => 'In',
'Include documents' => 'Include documents',
'Include images' => 'Include images',
'Information' => 'Information',
'Invalid product_sale_elements' => 'Invalid product_sale_elements',
'Invalid value "%value" for "%param" parameter in loop type: %type, name: %name' => 'Invalid value "%value" for "%param" parameter in loop type: %type, name: %name',
'Invalid value for walkMode parameter: %value' => 'Invalid value for walkMode parameter: %value',
'Invoice' => 'Invoice',
'Invoice choice' => 'Invoice choice',
'Is it the default product sale element ?' => 'Is it the default product sale element ?',
'It is not allowed to delete the default currency' => 'It is not allowed to delete the default currency',
'It is not allowed to delete the default language' => 'It is not allowed to delete the default language',
'JavaScript' => 'JavaScript',
'Keep the most important part of your description in the first 150-160 characters.' => 'Keep the most important part of your description in the first 150-160 characters.',
'Language' => 'Language',
'Language name' => 'Language name',
'Languages' => 'Languages',
'Last Name' => 'Last Name',
'Lastname' => 'Lastname',
'Layout' => 'Layout',
'Less than' => 'Less than',
'Less than or equals' => 'Less than or equals',
'Log format *' => 'Log format *',
'Log level *' => 'Log level *',
'Login' => 'Login',
'Login failed. Please check your username and password.' => 'Login failed. Please check your username and password.',
'Login page' => 'Login page',
'Logs' => 'Logs',
'Loop cannot implements multiple Search Interfaces : `PropelSearchLoopInterface`, `ArraySearchLoopInterface`' => 'Loop cannot implements multiple Search Interfaces : `PropelSearchLoopInterface`, `ArraySearchLoopInterface`',
'Loop must implements \'PropelSearchLoopInterface\' to be timestampable' => 'Loop must implements \'PropelSearchLoopInterface\' to be timestampable',
'Loop must implements \'PropelSearchLoopInterface\' to be versionable' => 'Loop must implements \'PropelSearchLoopInterface\' to be versionable',
'Loop must implements one of the following interfaces : `PropelSearchLoopInterface`, `ArraySearchLoopInterface`' => 'Loop must implements one of the following interfaces : `PropelSearchLoopInterface`, `ArraySearchLoopInterface`',
'Lost password' => 'Lost password',
'Mail template name' => 'Mail template name',
'Mail template purpose' => 'Mail template purpose',
'Mailing system' => 'Mailing system',
'Main address' => 'Main address',
'Main area' => 'Main area',
'Make sure it uses keywords found within the page itself.' => 'Make sure it uses keywords found within the page itself.',
'Make sure that your title is clear, and contains many of the keywords within the page itself.' => 'Make sure that your title is clear, and contains many of the keywords within the page itself.',
'Make this address as my primary address' => 'Make this address as my primary address',
'Maximum usage count reached for coupon %code' => 'Maximum usage count reached for coupon %code',
'Message' => 'Message',
'Message name' => 'Message name',
'Message subject' => 'Message subject',
'Messages' => 'Messages',
'Meta Description' => 'Meta Description',
'Meta Keywords' => 'Meta Keywords',
'Method Name' => 'Method Name',
'Missing or invalid data: %s' => 'Missing or invalid data: %s',
'Module' => 'Module',
'Module "%name%" directory was not found' => 'Module "%name%" directory was not found',
'Module %name directory doesn\'t exists.' => 'Module %name directory doesn\'t exists.',
'Module %name should have a config.xml in the Config directory.' => 'Module %name should have a config.xml in the Config directory.',
'Module %name should have a module.xml in the Config directory.' => 'Module %name should have a module.xml in the Config directory.',
'Module ID not found' => 'Module ID not found',
'Module hook' => 'Module hook',
'Modules' => 'Modules',
'Name' => 'Name',
'Name *' => 'Name *',
'Name of the HTML layout file' => 'Name of the HTML layout file',
'Name of the HTML template file' => 'Name of the HTML template file',
'Name of the text layout file' => 'Name of the text layout file',
'Name of the text template file' => 'Name of the text template file',
'Native' => 'Native',
'New Password' => 'New Password',
'Newsletter page' => 'Newsletter page',
'No %obj was created.' => 'No %obj was created.',
'No %obj was updated.' => 'No %obj was updated.',
'No module found for code \'%item\'' => 'No module found for code \'%item\'',
'No, I am a new customer.' => 'No, I am a new customer.',
'Not equal to' => 'Différent de',
'Not found' => 'Not found',
'Not in' => 'Not in',
'Notices' => 'Notices',
'Only files having the following mime type are allowed: %types%' => 'Only files having the following mime type are allowed: %types%',
'Only hookable modules are displayed in this menu.' => 'Only hookable modules are displayed in this menu.',
'Only if order billing country is %op% <strong>%countries_list%</strong>' => 'Le pays de facturation de la commande est %op% <strong>%countries_list%</strong> ',
'Only if order shipping country is %op% <strong>%countries_list%</strong>' => 'Le pays de livraison de la commande est %op% <strong>%countries_list%</strong> ',
'Order' => 'Order',
'Order address ID not found' => 'Order address ID not found',
'Order failed' => 'Order failed',
'Order ref. %ref is now unpaid.' => 'Order ref. %ref is now unpaid.',
'Order ref. %ref, ID %id has been successfully paid.' => 'Order ref. %ref, ID %id has been successfully paid.',
'Orders' => 'Orders',
'Overall' => 'Overall',
'Page 404' => 'Page 404',
'Page Title' => 'Page Title',
'Parent category *' => 'Parent category *',
'Parent folder *' => 'Parent folder *',
'Password' => 'Password',
'Password *' => 'Password *',
'Password confirmation' => 'Password confirmation',
'Payment failed' => 'Payment failed',
'Payment gateway' => 'Payment gateway',
'Per customer' => 'Per customer',
'Percentage' => 'Percentage',
'Percentage discount for selected attribute values' => 'Percentage discount for selected attribute values',
'Percentage discount for selected categories' => 'Percentage discount for selected categories',
'Percentage discount for selected products' => 'Percentage discount for selected products',
'Percentage of the product price' => 'Percentage of the product price',
'Phone' => 'Phone',
'Placed order' => 'Placed order',
'Please accept the Terms and conditions in order to register.' => 'Please accept the Terms and conditions in order to register.',
'Please check your input: %error' => 'Please check your input: %error',
'Please enter your email address' => 'Please enter your email address',
'Please enter your password' => 'Please enter your password',
'Please select a category' => 'Please select a category',
'Please select an attribute' => 'Please select an attribute',
'Please select at least one attribute value' => 'Please select at least one attribute value',
'Please select at least one category' => 'Please select at least one category',
'Please select at least one product' => 'Please select at least one product',
'Please select the category of the offred product' => 'Please select the category of the offred product',
'Please select the offered product' => 'Please select the offered product',
'Please upload a valid Zip file' => 'Please upload a valid Zip file',
'Port' => 'Port',
'Postage' => 'Postage',
'Preferred locale' => 'Preferred locale',
'Prevent mailing template modification or deletion, except for super-admin' => 'Prevent mailing template modification or deletion, except for super-admin',
'Prevent variable modification or deletion, except for super-admin' => 'Prevent variable modification or deletion, except for super-admin',
'Price currency *' => 'Price currency *',
'Processing cancelation of payment for order ref. %ref' => 'Processing cancelation of payment for order ref. %ref',
'Processing confirmation of order ref. %ref, ID %id' => 'Processing confirmation of order ref. %ref, ID %id',
'Prodcut ID *' => 'Prodcut ID *',
'Product' => 'Product',
'Product ID' => 'Product ID',
'Product ID *' => 'Product ID *',
'Product attributes' => 'Product attributes',
'Product base price excluding taxes *' => 'Product base price excluding taxes *',
'Product base price with taxes' => 'Product base price with taxes',
'Product categories' => 'Product categories',
'Product loop' => 'Product loop',
'Product page' => 'Product page',
'Product price excluding taxes' => 'Product price excluding taxes',
'Product price excluding taxes *' => 'Product price excluding taxes *',
'Product price including taxes' => 'Product price including taxes',
'Product price offset for each currency' => 'Product price offset for each currency',
'Product reference *' => 'Product reference *',
'Product sale element ID *' => 'Product sale element ID *',
'Product template' => 'Product template',
'Product title' => 'Product title',
'ProductSaleElement modification' => 'ProductSaleElement modification',
'Products' => 'Products',
'Profile' => 'Profile',
'Profile Code' => 'Profile Code',
'Profile ID not found' => 'Profile ID not found',
'Profile `code` already exists' => 'Profile `code` already exists',
'Profiles' => 'Profiles',
'Purpose' => 'Purpose',
'Purpose *' => 'Purpose *',
'Quantity' => 'Quantity',
'Rate from € *' => 'Rate from € *',
'Redirecting ...' => 'Redirecting ...',
'Redirecting to %url' => 'Redirecting to %url',
'Reference' => 'Reference',
'Reference *' => 'Reference *',
'Register' => 'Register',
'Remember me ?' => 'Remember me ?',
'Remove X percent to total cart' => 'Remove X percent to total cart',
'Replace by the default language' => 'Replace by the default language',
'Replace current document by this file' => 'Replace current document by this file',
'Replace current image by this file' => 'Replace current image by this file',
'Reseller' => 'Reseller',
'Rewriten URL' => 'Rewriten URL',
'Rotated Text File' => 'Rotated Text File',
'Sale announce label' => 'Sale announce label',
'Sale price excluding taxes' => 'Sale price excluding taxes',
'Sale price including taxes' => 'Sale price including taxes',
'Sale title' => 'Sale title',
'Saving %obj% for %parentName% parent id %parentId%' => 'Saving %obj% for %parentName% parent id %parentId%',
'Search' => 'Search',
'Search page' => 'Search page',
'Select the brand logo' => 'Select the brand logo',
'Select the brand logo amongst the brand images' => 'Select the brand logo amongst the brand images',
'Select the categories of the products covered by this operation' => 'Select the categories of the products covered by this operation',
'Select the countries to include in this shipping zone' => 'Select the countries to include in this shipping zone',
'Select the discount type that will be applied to original product prices' => 'Select the discount type that will be applied to original product prices',
'Select the product attributes included in this operation' => 'Select the product attributes included in this operation',
'Select the product brand, or supplier.' => 'Select the product brand, or supplier.',
'Select the products covered by this operation' => 'Select the products covered by this operation',
'Select the virtual document' => 'Select the virtual document',
'Service ID' => 'Service ID',
'Shipping configuration' => 'Shipping configuration',
'Shipping zone name' => 'Shipping zone name',
'Short additional text' => 'Short additional text',
'Short description text' => 'Short description text',
'Show redirections *' => 'Show redirections *',
'Sitemap' => 'Sitemap',
'Sorry, an error occured.' => 'Sorry, an error occured.',
'Sorry, an error occured: %msg' => 'Sorry, an error occured: %msg',
'Sorry, an error occured: %s' => 'Sorry, an error occured: %s',
'Sorry, an error occurred: %err' => 'Sorry, an error occurred: %err',
'Sorry, you are not allowed to perform this action.' => 'Sorry, you are not allowed to perform this action.',
'Sorry, you\'re not allowed to perform this action' => 'Sorry, you\'re not allowed to perform this action',
'Source IP' => 'Source IP',
'Start date' => 'Date de début de validité',
'Start date of sales' => 'Start date of sales',
'Stats on %month/%year' => 'Stats on %month/%year',
'Store Business Identification Number (SIRET, etc).' => 'Store Business Identification Number (SIRET, etc).',
'Store Information' => 'Store Information',
'Store configuration failed.' => 'Store configuration failed.',
'Store description' => 'Store description',
'Store email address' => 'Store email address',
'Store logs into text file' => 'Store logs into text file',
'Store logs into text file, up to a certian size, then a new file is created' => 'Store logs into text file, up to a certian size, then a new file is created',
'Store name' => 'Store name',
'Street Address' => 'Street Address',
'Street Address ' => 'Street Address ',
'Strictly use the requested language' => 'Strictly use the requested language',
'Subject' => 'Subject',
'Summary' => 'Summary',
'Symbol *' => 'Symbol *',
'System' => 'System',
'System configuration' => 'System configuration',
'System log configuration failed.' => 'System log configuration failed.',
'Tax' => 'Tax',
'Tax ID not found in tax list JSON' => 'Tax ID not found in tax list JSON',
'Tax list is not valid JSON' => 'Tax list is not valid JSON',
'Tax rule ID not found' => 'Tax rule ID not found',
'Tax rule for this product *' => 'Tax rule for this product *',
'Taxes' => 'Taxes',
'Taxes rules' => 'Taxes rules',
'Template' => 'Template',
'Template Name *' => 'Template Name *',
'Templates' => 'Templates',
'Text File' => 'Text File',
'Text Message' => 'Text Message',
'The %name module definition has not been initialized.' => 'The %name module definition has not been initialized.',
'The %name module descriptor has not been initialized.' => 'The %name module descriptor has not been initialized.',
'The %obj_name id \'%id\' doesn\'t exist' => 'The %obj_name id \'%id\' doesn\'t exist',
'The HTML TITLE element is the most important element on your web page.' => 'The HTML TITLE element is the most important element on your web page.',
'The TaxEngine should be passed to this form before using it.' => 'The TaxEngine should be passed to this form before using it.',
'The archive builder "%name" doesn\'t exist' => 'The archive builder "%name" doesn\'t exist',
'The brand name or title' => 'The brand name or title',
'The cache directory "%env" is not writable' => 'The cache directory "%env" is not writable',
'The cache file %file is not readable' => 'The cache file %file is not readable',
'The cart item count should match the condition' => 'The cart item count should match the condition',
'The changes could on the Zip Archive not be commited' => 'The changes could on the Zip Archive not be commited',
'The class "%class" doesn\'t exist' => 'The class "%class" doesn\'t exist',
'The class "%class" must be a subclass of %baseClass' => 'The class "%class" must be a subclass of %baseClass',
'The class "%class" must extend %baseClass' => 'The class "%class" must extend %baseClass',
'The column %column that you want to sort doesn\'t exist' => 'The column %column that you want to sort doesn\'t exist',
'The content is not correct' => 'The content is not correct',
'The coupon applies if the cart contains at least one product of the selected categories' => 'The coupon applies if the cart contains at least one product of the selected categories',
'The coupon applies if the cart contains at least one product of the specified product list' => 'The coupon applies if the cart contains at least one product of the specified product list',
'The coupon applies to some customers only' => 'The coupon applies to some customers only',
'The coupon applies to the selected billing countries' => 'The coupon applies to the selected billing countries',
'The coupon applies to the selected delivery countries' => 'Ce code promo s\'applique seulement aux pays de facturation sélectionnés',
'The coupon code \'%code\' already exist. Please choose another coupon code' => 'The coupon code \'%code\' already exist. Please choose another coupon code',
'The coupon code \'%code\' already exists. Please choose another coupon code' => 'The coupon code \'%code\' already exists. Please choose another coupon code',
'The coupon is valid after a given date' => 'Le code promo est valide seulement à partir d\'une certaine date',
'The date after which sales are de-activated. Please use %fmt format.' => 'The date after which sales are de-activated. Please use %fmt format.',
'The date from which sales are active. Please use %fmt format.' => 'The date from which sales are active. Please use %fmt format.',
'The detailed description.' => 'The detailed description.',
'The directory %dir has not been created in the archive' => 'The directory %dir has not been created in the archive',
'The extension "%ext" is not allowed' => 'The extension "%ext" is not allowed',
'The file %file has not been deleted' => 'The file %file has not been deleted',
'The file %file is missing or is not readable' => 'The file %file is missing or is not readable',
'The file %file is not readable' => 'The file %file is not readable',
'The file %path has been successfully downloaded' => 'The file %path has been successfully downloaded',
'The file name must be valid' => 'The file name must be valid',
'The filename is not correct' => 'The filename is not correct',
'The following columns are missing: %columns' => 'The following columns are missing: %columns',
'The formatter "%name" doesn\'t exist' => 'The formatter "%name" doesn\'t exist',
'The image which replaces an undefined country flag (%file) was not found. Please check unknown-flag-path configuration variable, and check that the image exists.' => 'The image which replaces an undefined country flag (%file) was not found. Please check unknown-flag-path configuration variable, and check that the image exists.',
'The language "%id" doesn\'t exist' => 'The language "%id" doesn\'t exist',
'The loop "%loop" doesn\'t exist' => 'The loop "%loop" doesn\'t exist',
'The mailing template in HTML format.' => 'The mailing template in HTML format.',
'The mailing template in text-only format.' => 'The mailing template in text-only format.',
'The method "%class"::buildDataSet must return an array or a ModelCriteria' => 'The method "%class"::buildDataSet must return an array or a ModelCriteria',
'The method name that will handle the hook event.' => 'The method name that will handle the hook event.',
'The module "%name%" is currently in use by at least one order, and can\'t be deleted.' => 'The module "%name%" is currently in use by at least one order, and can\'t be deleted.',
'The module %module has been installed successfully.' => 'The module %module has been installed successfully.',
'The module %name is already installed in the same or greater version.' => 'The module %name is already installed in the same or greater version.',
'The module %name requires Thelia %version or newer' => 'The module %name requires Thelia %version or newer',
'The module has to be activated.' => 'The module has to be activated.',
'The module is not valid : %message' => 'The module is not valid : %message',
'The module zip file' => 'The module zip file',
'The product document id %id doesn\'t exists' => 'The product document id %id doesn\'t exists',
'The product image id %id doesn\'t exists' => 'The product image id %id doesn\'t exists',
'The product sale element id %id doesn\'t exist' => 'The product sale element id %id doesn\'t exist',
'The product sale element reference %id doesn\'t exist' => 'The product sale element reference %id doesn\'t exist',
'The product sale elements id %id doesn\'t exist' => 'The product sale elements id %id doesn\'t exist',
'The product sale elements id %id doesn\'t exists' => 'The product sale elements id %id doesn\'t exists',
'The resource %res has not been found' => 'The resource %res has not been found',
'The sale announce label, such as Sales ! or Flash Sales !' => 'The sale announce label, such as Sales ! or Flash Sales !',
'The sale name or descriptive title' => 'The sale name or descriptive title',
'The sale name or title' => 'The sale name or title',
'The service id that will handle the hook (defined in the config.xml file of the module).' => 'The service id that will handle the hook (defined in the config.xml file of the module).',
'The store fax number.' => 'The store fax number.',
'The store phone number.' => 'The store phone number.',
'The type %type is not valid' => 'The type %type is not valid',
'There are no files to associate.' => 'There are no files to associate.',
'There is no id "%id" in the export categories' => 'There is no id "%id" in the export categories',
'There is no id "%id" in the exports' => 'There is no id "%id" in the exports',
'There is no id "%id" in the import categories' => 'There is no id "%id" in the import categories',
'There is no id "%id" in the imports' => 'There is no id "%id" in the imports',
'There\'s a conflict between your file extension "%ext" and the mime type "%mime"' => 'There\'s a conflict between your file extension "%ext" and the mime type "%mime"',
'There\'s a problem, the extension "%ext" has been found, but has no formatters nor archive builder' => 'There\'s a problem, the extension "%ext" has been found, but has no formatters nor archive builder',
'This PHP extension should be installed and loaded.' => 'This PHP extension should be installed and loaded.',
'This brand is online' => 'This brand is online',
'This category is online.' => 'This category is online.',
'This condition is always true' => 'This condition is always true',
'This content is online.' => 'This content is online.',
'This coupon adds a free product to the cart if one of the selected products is in the cart.' => 'This coupon adds a free product to the cart if one of the selected products is in the cart.',
'This coupon does not exists' => 'This coupon does not exists',
'This coupon subtracts from the order total a percentage of the price of each product which belongs to the selected categories. If the discount is greater than the total order, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.' => 'This coupon subtracts from the order total a percentage of the price of each product which belongs to the selected categories. If the discount is greater than the total order, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.',
'This coupon subtracts from the order total the specified percentage of each product price which uses the selected attribute values. If the discount is greater than the total order, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.' => 'This coupon subtracts from the order total the specified percentage of each product price which uses the selected attribute values. If the discount is greater than the total order, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.',
'This coupon subtracts from the order total the specified percentage of each selected product price. If the discount is greater than the total order, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.' => 'This coupon subtracts from the order total the specified percentage of each selected product price. If the discount is greater than the total order, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.',
'This coupon subtracts the specified amount from the order total for each product which belongs to the selected categories. If the discount is greater than the total order, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.' => 'This coupon subtracts the specified amount from the order total for each product which belongs to the selected categories. If the discount is greater than the total order, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.',
'This coupon subtracts the specified amount from the order total for each product which uses the selected attribute values. If the discount is greater than the total order, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.' => 'This coupon subtracts the specified amount from the order total for each product which uses the selected attribute values. If the discount is greater than the total order, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.',
'This coupon subtracts the specified amount from the order total for each selected product. If the discount is greater than the total order, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.' => 'This coupon subtracts the specified amount from the order total for each selected product. If the discount is greater than the total order, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.',
'This coupon will offert a flat percentage off a shopper\'s entire order (not applied to shipping costs or tax rates). If the discount is greater than the total order corst, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.' => 'This coupon will offert a flat percentage off a shopper\'s entire order (not applied to shipping costs or tax rates). If the discount is greater than the total order corst, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.',
'This coupon will subtracts a set amount from the total cost of an order. If the discount is greater than the total order corst, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.' => 'This coupon will subtracts a set amount from the total cost of an order. If the discount is greater than the total order corst, the customer will only pay the shipping, or nothing if the coupon also provides free shipping.',
'This document is online' => 'This document is online',
'This email already exists.' => 'This email already exists.',
'This email does not exists' => 'This email does not exists',
'This folder is online.' => 'This folder is online.',
'This hook is specific to a module (delivery/payment modules).' => 'This hook is specific to a module (delivery/payment modules).',
'This image is online' => 'This image is online',
'This is a comma separated list of email addresses where store notifications (such as order placed) are sent.' => 'This is a comma separated list of email addresses where store notifications (such as order placed) are sent.',
'This is an identifier that will be used in the code to get this message' => 'This is an identifier that will be used in the code to get this message',
'This is the contact email address, and the sender email of all e-mails sent by your store.' => 'This is the contact email address, and the sender email of all e-mails sent by your store.',
'This is the message purpose, such as \'Order confirmation\'.' => 'This is the message purpose, such as \'Order confirmation\'.',
'This is the subject of the e-mail, such as \'Your order is confirmed\'.' => 'This is the subject of the e-mail, such as \'Your order is confirmed\'.',
'This login already exists' => 'This login already exists',
'This product does not have a physical presence' => 'This product does not have a physical presence',
'This product is on sale' => 'This product is on sale',
'This product is online' => 'This product is online',
'This product_sale_elements_id does not exists for this product : %d' => 'This product_sale_elements_id does not exists for this product : %d',
'This template is in use in some of your products, and cannot be deleted. Delete it from all your products and try again.' => 'This template is in use in some of your products, and cannot be deleted. Delete it from all your products and try again.',
'This the unique name of this message. Do not change this value unless you understand what you do.' => 'This the unique name of this message. Do not change this value unless you understand what you do.',
'This value should not be blank.' => 'This value should not be blank.',
'Timeout' => 'Timeout',
'Title' => 'Title',
'Title *' => 'Title *',
'Title ID not found' => 'Title ID not found',
'To activate module %name, the following modules should be activated first: %modules' => 'To activate module %name, the following modules should be activated first: %modules',
'Tools' => 'Tools',
'Translations' => 'Translations',
'Tried to download a file, but the URL was not valid: %url' => 'Tried to download a file, but the URL was not valid: %url',
'Type' => 'Type',
'URL_ALREADY_EXISTS' => 'This URL already exists',
'Unable to get module code from the fullnamespace element of the module descriptor: \'%val\'' => 'Unable to get module code from the fullnamespace element of the module descriptor: \'%val\'',
'Unable to process your request. Please try again (%err).' => 'Unable to process your request. Please try again (%err).',
'Unable to write the file %file into the archive' => 'Unable to write the file %file into the archive',
'Unavailable' => 'Unavailable',
'Unconditional usage' => 'Unconditional usage',
'Unconditionnal usage' => 'Unconditionnal usage',
'Undefined loop argument "%name"' => 'Undefined loop argument "%name"',
'Undefined search mode \'%mode\'' => 'Undefined search mode \'%mode\'',
'Unknown error while deleting the file %file' => 'Unknown error while deleting the file %file',
'Unknown order ID: %id' => 'Unknown order ID: %id',
'Unsupported magic method %name. only getArgname() is supported.' => 'Unsupported magic method %name. only getArgname() is supported.',
'Unsupported type' => 'Unsupported type',
'Update customer account' => 'Update customer account',
'Use the keyword phrase in your URL.' => 'Use the keyword phrase in your URL.',
'Used in your store front' => 'Used in your store front',
'Username' => 'Username',
'Username *' => 'Username *',
'Valid only from %date% to the coupon expiration date' => 'Valide à partir du %date% jusqu\'à la date d\'expiration',
'Value' => 'Value',
'Value %val for Discount Amount is invalid. Please enter a positive value.' => 'Value %val for Discount Amount is invalid. Please enter a positive value.',
'Value %val for Percent Discount is invalid. Please enter a positive value between 1 and 100.' => 'Value %val for Percent Discount is invalid. Please enter a positive value between 1 and 100.',
'Value *' => 'Value *',
'Value create form' => 'Value create form',
'Variable' => 'Variable',
'Variables' => 'Variables',
'Virtual document' => 'Virtual document',
'Warnings' => 'Warnings',
'We\'re sorry, this PDF invoice is not available at the moment.' => 'We\'re sorry, this PDF invoice is not available at the moment.',
'Weight' => 'Weight',
'Wrong form method, %s expected.' => 'Wrong form method, %s expected.',
'Yes, I have a password :' => 'Yes, I have a password :',
'You are already registered!' => 'You are already registered!',
'You don\'t need to use commas or other punctuations.' => 'You don\'t need to use commas or other punctuations.',
'You have to configure your store email first !' => 'You have to configure your store email first !',
'You must define an environment when you use an archive builder' => 'You must define an environment when you use an archive builder',
'You tried to load a bad formatted XML' => 'You tried to load a bad formatted XML',
'You\'ve submitted this form too many times. ' => 'You\'ve submitted this form too many times. ',
'Your Email Address' => 'Your Email Address',
'Your Message' => 'Your Message',
'Your archive must contain one of these file and doesn\'t: %files' => 'Your archive must contain one of these file and doesn\'t: %files',
'Your configuration seems to be ok. Checked out your mailbox : %email%' => 'Your configuration seems to be ok. Checked out your mailbox : %email%',
'Your current password does not match.' => 'Your current password does not match.',
'Your zip must contain 1 root directory which is the root folder directory of your module' => 'Your zip must contain 1 root directory which is the root folder directory of your module',
'Zip Error' => 'Zip Error',
'Zip code' => 'Zip code',
'Zone' => 'Zone',
'[%zip_head] ' => '[%zip_head] ',
'accessories table header' => 'accessories table header',
'accessories table row' => 'accessories table row',
'add to all form' => 'add to all form',
'additional information' => 'additional information',
'address create form' => 'address create form',
'address delete form' => 'address delete form',
'address update form' => 'address update form',
'after addresse area' => 'after addresse area',
'after combinations' => 'after combinations',
'after footer' => 'after footer',
'after javascript include' => 'after javascript include',
'after javascript initialisation' => 'after javascript initialisation',
'after product listing' => 'after product listing',
'after the information area' => 'after the information area',
'after the main content area' => 'after the main content area',
'after the opening of the body tag' => 'after the opening of the body tag',
'after the opening of the head tag' => 'after the opening of the head tag',
'after the order summary' => 'after the order summary',
'after top bar' => 'after top bar',
'after top menu' => 'after top menu',
'api id %id does not exists' => 'api id %id does not exists',
'at the bottom' => 'at the bottom',
'at the bottom of column 1' => 'at the bottom of column 1',
'at the bottom of information area' => 'at the bottom of information area',
'at the bottom of the catalog' => 'at the bottom of the catalog',
'at the bottom of the detail area' => 'at the bottom of the detail area',
'at the bottom of the footer' => 'at the bottom of the footer',
'at the bottom of the form' => 'at the bottom of the form',
'at the bottom of the header' => 'at the bottom of the header',
'at the bottom of the main area' => 'at the bottom of the main area',
'at the bottom of the shipping area' => 'at the bottom of the shipping area',
'at the bottom of the sidebar' => 'at the bottom of the sidebar',
'at the bottom of the system area' => 'at the bottom of the system area',
'at the bottom of the top bar' => 'at the bottom of the top bar',
'at the top' => 'at the top',
'at the top of the catalog area' => 'at the top of the catalog area',
'at the top of the column' => 'at the top of the column',
'at the top of the configuration' => 'at the top of the configuration',
'at the top of the detail' => 'at the top of the detail',
'at the top of the footer' => 'at the top of the footer',
'at the top of the form' => 'at the top of the form',
'at the top of the header' => 'at the top of the header',
'at the top of the main area' => 'at the top of the main area',
'at the top of the shipping area' => 'at the top of the shipping area',
'at the top of the sidebar' => 'at the top of the sidebar',
'at the top of the system area' => 'at the top of the system area',
'at the top of the top bar' => 'at the top of the top bar',
'attributes table header' => 'attributes table header',
'attributes table row' => 'attributes table row',
'before combinations' => 'before combinations',
'before footer' => 'before footer',
'before the end body tag' => 'before the end body tag',
'before the end of the head tag' => 'before the end of the head tag',
'before the main content area' => 'before the main content area',
'before top menu' => 'before top menu',
'before topbar' => 'before topbar',
'block' => 'block',
'bottom' => 'bottom',
'bottom of the footer' => 'bottom of the footer',
'cURL errno %errno, http code %http_code on link "%path": %error' => 'cURL errno %errno, http code %http_code on link "%path": %error',
'caption' => 'caption',
'categories table header' => 'categories table header',
'categories table row' => 'categories table row',
'combination delete form' => 'combination delete form',
'combinations list caption' => 'combinations list caption',
'configuration' => 'configuration',
'configuration JavaScript' => 'configuration JavaScript',
'configuration bottom' => 'configuration bottom',
'content' => 'content',
'content area' => 'content area',
'content delete form' => 'content delete form',
'content edit JavaScript' => 'content edit JavaScript',
'contents table header' => 'contents table header',
'contents table row' => 'contents table row',
'context' => 'context',
'country delete form' => 'country delete form',
'create JavaScript' => 'create JavaScript',
'create form' => 'create form',
'customer account' => 'customer account',
'date format' => 'date format',
'decimal separator' => 'decimal separator',
'delete form' => 'delete form',
'delivery address' => 'delivery address',
'delivery module %s is not a Thelia\Module\DeliveryModuleInterface' => 'delivery module %s is not a Thelia\Module\DeliveryModuleInterface',
'details pricing form' => 'details pricing form',
'details promotion form' => 'details promotion form',
'email' => 'email',
'email confirmation is not the same as email field' => 'The two email addresses are not identical',
'email system JavaScript' => 'email system JavaScript',
'extra area' => 'extra area',
'extra payment zone' => 'extra payment zone',
'features table row' => 'features table row',
'features-table-header' => 'features-table-header',
'folders table header' => 'folders table header',
'folders table row' => 'folders table row',
'footer body' => 'footer body',
'header' => 'header',
'hook delete form' => 'hook delete form',
'hook edit JavaScript' => 'hook edit JavaScript',
'hour(s)' => 'hour(s)',
'id delete form' => 'id delete form',
'if successful response' => 'if successful response',
'imprint' => 'imprint',
'in footer' => 'in footer',
'in the header' => 'in the header',
'in the menu catalog' => 'in the menu catalog',
'in the menu configuration' => 'in the menu configuration',
'in the menu customers' => 'in the menu customers',
'in the menu folders' => 'in the menu folders',
'in the menu modules' => 'in the menu modules',
'in the menu orders' => 'in the menu orders',
'in the menu tools' => 'in the menu tools',
'in top menu items' => 'in top menu items',
'inside top bar' => 'inside top bar',
'javascript' => 'javascript',
'javascript initialization' => 'javascript initialization',
'label' => 'label',
'language locale' => 'language locale',
'language selection page' => 'language selection page',
'list JavaScript' => 'list JavaScript',
'list caption' => 'list caption',
'list header' => 'list header',
'list row' => 'list row',
'logs JavaScript' => 'logs JavaScript',
'mailing system modification' => 'mailing system modification',
'main area' => 'main area',
'middle' => 'middle',
'minute(s)' => 'minute(s)',
'password confirmation is not the same as password field' => 'password confirmation is not the same as password field',
'password must be composed of at least 4 characters' => 'password must be composed of at least 4 characters',
'payment module %s is not a Thelia\Module\PaymentModuleInterface' => 'payment module %s is not a Thelia\Module\PaymentModuleInterface',
'pdf' => 'pdf',
'permanent discount (in percent)' => 'permanent discount (in percent)',
'photo gallery' => 'photo gallery',
'plain, login, cram-md5 or empty' => 'plain, login, cram-md5 or empty',
'primary navigation' => 'primary navigation',
'product list' => 'product list',
'quantity value is not valid' => 'quantity value is not valid',
'remove to all form' => 'remove to all form',
'row' => 'row',
'rule create form' => 'rule create form',
'rule delete form' => 'rule delete form',
'rule edit JavaScript' => 'rule edit JavaScript',
'secondary navigation' => 'secondary navigation',
'ssl, tls or empty' => 'ssl, tls or empty',
'stock edit form' => 'stock edit form',
'table header' => 'table header',
'table row' => 'table row',
'tax rule' => 'tax rule',
'the body of the sidebar' => 'the body of the sidebar',
'this product id does not exists : %d' => 'this product id does not exists : %d',
'thousands separator' => 'thousands separator',
'time format' => 'time format',
'type' => 'type',
'update JavaScript' => 'update JavaScript',
'update form' => 'update form',
'value table header' => 'value table header',
'value table row' => 'value table row',
);
| 1 | 10,951 | Owww, I think I found a typo. `suche` | thelia-thelia | php |
@@ -131,8 +131,6 @@ void search(SearchEngineData<Algorithm> &engine_working_data,
source_phantom.GetReverseWeightPlusOffset(),
source_phantom.reverse_segment_id.id);
}
- BOOST_ASSERT(forward_heap.Size() > 0);
- BOOST_ASSERT(reverse_heap.Size() > 0);
search(engine_working_data,
facade, | 1 | #include "engine/routing_algorithms/shortest_path.hpp"
#include "engine/routing_algorithms/routing_base_ch.hpp"
#include "engine/routing_algorithms/routing_base_mld.hpp"
#include <boost/assert.hpp>
#include <boost/optional.hpp>
#include <memory>
namespace osrm
{
namespace engine
{
namespace routing_algorithms
{
namespace
{
const static constexpr bool DO_NOT_FORCE_LOOP = false;
// allows a uturn at the target_phantom
// searches source forward/reverse -> target forward/reverse
template <typename Algorithm>
void searchWithUTurn(SearchEngineData<Algorithm> &engine_working_data,
const datafacade::ContiguousInternalMemoryDataFacade<Algorithm> &facade,
typename SearchEngineData<Algorithm>::QueryHeap &forward_heap,
typename SearchEngineData<Algorithm>::QueryHeap &reverse_heap,
const bool search_from_forward_node,
const bool search_from_reverse_node,
const bool search_to_forward_node,
const bool search_to_reverse_node,
const PhantomNode &source_phantom,
const PhantomNode &target_phantom,
const int total_weight_to_forward,
const int total_weight_to_reverse,
int &new_total_weight,
std::vector<NodeID> &leg_packed_path)
{
forward_heap.Clear();
reverse_heap.Clear();
if (search_from_forward_node)
{
forward_heap.Insert(source_phantom.forward_segment_id.id,
-source_phantom.GetForwardWeightPlusOffset(),
source_phantom.forward_segment_id.id);
}
if (search_from_reverse_node)
{
forward_heap.Insert(source_phantom.reverse_segment_id.id,
-source_phantom.GetReverseWeightPlusOffset(),
source_phantom.reverse_segment_id.id);
}
if (search_to_forward_node)
{
reverse_heap.Insert(target_phantom.forward_segment_id.id,
target_phantom.GetForwardWeightPlusOffset(),
target_phantom.forward_segment_id.id);
}
if (search_to_reverse_node)
{
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
target_phantom.GetReverseWeightPlusOffset(),
target_phantom.reverse_segment_id.id);
}
// this is only relevent if source and target are on the same compressed edge
auto is_oneway_source = !(search_from_forward_node && search_from_reverse_node);
auto is_oneway_target = !(search_to_forward_node && search_to_reverse_node);
// we only enable loops here if we can't search from forward to backward node
auto needs_loop_forwards = is_oneway_source && needsLoopForward(source_phantom, target_phantom);
auto needs_loop_backwards =
is_oneway_target && needsLoopBackwards(source_phantom, target_phantom);
search(engine_working_data,
facade,
forward_heap,
reverse_heap,
new_total_weight,
leg_packed_path,
needs_loop_forwards,
needs_loop_backwards,
{source_phantom, target_phantom});
// if no route is found between two parts of the via-route, the entire route becomes
// invalid. Adding to invalid edge weight sadly doesn't return an invalid edge weight. Here
// we prevent the possible overflow, faking the addition of infinity + x == infinity
if (new_total_weight != INVALID_EDGE_WEIGHT)
new_total_weight += std::min(total_weight_to_forward, total_weight_to_reverse);
}
// searches shortest path between:
// source forward/reverse -> target forward
// source forward/reverse -> target reverse
template <typename Algorithm>
void search(SearchEngineData<Algorithm> &engine_working_data,
const datafacade::ContiguousInternalMemoryDataFacade<Algorithm> &facade,
typename SearchEngineData<Algorithm>::QueryHeap &forward_heap,
typename SearchEngineData<Algorithm>::QueryHeap &reverse_heap,
const bool search_from_forward_node,
const bool search_from_reverse_node,
const bool search_to_forward_node,
const bool search_to_reverse_node,
const PhantomNode &source_phantom,
const PhantomNode &target_phantom,
const int total_weight_to_forward,
const int total_weight_to_reverse,
int &new_total_weight_to_forward,
int &new_total_weight_to_reverse,
std::vector<NodeID> &leg_packed_path_forward,
std::vector<NodeID> &leg_packed_path_reverse)
{
if (search_to_forward_node)
{
forward_heap.Clear();
reverse_heap.Clear();
reverse_heap.Insert(target_phantom.forward_segment_id.id,
target_phantom.GetForwardWeightPlusOffset(),
target_phantom.forward_segment_id.id);
if (search_from_forward_node)
{
forward_heap.Insert(source_phantom.forward_segment_id.id,
total_weight_to_forward -
source_phantom.GetForwardWeightPlusOffset(),
source_phantom.forward_segment_id.id);
}
if (search_from_reverse_node)
{
forward_heap.Insert(source_phantom.reverse_segment_id.id,
total_weight_to_reverse -
source_phantom.GetReverseWeightPlusOffset(),
source_phantom.reverse_segment_id.id);
}
BOOST_ASSERT(forward_heap.Size() > 0);
BOOST_ASSERT(reverse_heap.Size() > 0);
search(engine_working_data,
facade,
forward_heap,
reverse_heap,
new_total_weight_to_forward,
leg_packed_path_forward,
needsLoopForward(source_phantom, target_phantom),
routing_algorithms::DO_NOT_FORCE_LOOP,
{source_phantom, target_phantom});
}
if (search_to_reverse_node)
{
forward_heap.Clear();
reverse_heap.Clear();
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
target_phantom.GetReverseWeightPlusOffset(),
target_phantom.reverse_segment_id.id);
if (search_from_forward_node)
{
forward_heap.Insert(source_phantom.forward_segment_id.id,
total_weight_to_forward -
source_phantom.GetForwardWeightPlusOffset(),
source_phantom.forward_segment_id.id);
}
if (search_from_reverse_node)
{
forward_heap.Insert(source_phantom.reverse_segment_id.id,
total_weight_to_reverse -
source_phantom.GetReverseWeightPlusOffset(),
source_phantom.reverse_segment_id.id);
}
BOOST_ASSERT(forward_heap.Size() > 0);
BOOST_ASSERT(reverse_heap.Size() > 0);
search(engine_working_data,
facade,
forward_heap,
reverse_heap,
new_total_weight_to_reverse,
leg_packed_path_reverse,
routing_algorithms::DO_NOT_FORCE_LOOP,
needsLoopBackwards(source_phantom, target_phantom),
{source_phantom, target_phantom});
}
}
template <typename Algorithm>
void unpackLegs(const datafacade::ContiguousInternalMemoryDataFacade<Algorithm> &facade,
const std::vector<PhantomNodes> &phantom_nodes_vector,
const std::vector<NodeID> &total_packed_path,
const std::vector<std::size_t> &packed_leg_begin,
const EdgeWeight shortest_path_weight,
InternalRouteResult &raw_route_data)
{
raw_route_data.unpacked_path_segments.resize(packed_leg_begin.size() - 1);
raw_route_data.shortest_path_weight = shortest_path_weight;
for (const auto current_leg : util::irange<std::size_t>(0UL, packed_leg_begin.size() - 1))
{
auto leg_begin = total_packed_path.begin() + packed_leg_begin[current_leg];
auto leg_end = total_packed_path.begin() + packed_leg_begin[current_leg + 1];
const auto &unpack_phantom_node_pair = phantom_nodes_vector[current_leg];
unpackPath(facade,
leg_begin,
leg_end,
unpack_phantom_node_pair,
raw_route_data.unpacked_path_segments[current_leg]);
raw_route_data.source_traversed_in_reverse.push_back(
(*leg_begin != phantom_nodes_vector[current_leg].source_phantom.forward_segment_id.id));
raw_route_data.target_traversed_in_reverse.push_back(
(*std::prev(leg_end) !=
phantom_nodes_vector[current_leg].target_phantom.forward_segment_id.id));
}
}
}
template <typename Algorithm>
InternalRouteResult
shortestPathSearch(SearchEngineData<Algorithm> &engine_working_data,
const datafacade::ContiguousInternalMemoryDataFacade<Algorithm> &facade,
const std::vector<PhantomNodes> &phantom_nodes_vector,
const boost::optional<bool> continue_straight_at_waypoint)
{
InternalRouteResult raw_route_data;
raw_route_data.segment_end_coordinates = phantom_nodes_vector;
const bool allow_uturn_at_waypoint =
!(continue_straight_at_waypoint ? *continue_straight_at_waypoint
: facade.GetContinueStraightDefault());
engine_working_data.InitializeOrClearFirstThreadLocalStorage(facade.GetNumberOfNodes());
auto &forward_heap = *engine_working_data.forward_heap_1;
auto &reverse_heap = *engine_working_data.reverse_heap_1;
int total_weight_to_forward = 0;
int total_weight_to_reverse = 0;
bool search_from_forward_node =
phantom_nodes_vector.front().source_phantom.IsValidForwardSource();
bool search_from_reverse_node =
phantom_nodes_vector.front().source_phantom.IsValidReverseSource();
std::vector<NodeID> prev_packed_leg_to_forward;
std::vector<NodeID> prev_packed_leg_to_reverse;
std::vector<NodeID> total_packed_path_to_forward;
std::vector<std::size_t> packed_leg_to_forward_begin;
std::vector<NodeID> total_packed_path_to_reverse;
std::vector<std::size_t> packed_leg_to_reverse_begin;
std::size_t current_leg = 0;
// this implements a dynamic program that finds the shortest route through
// a list of vias
for (const auto &phantom_node_pair : phantom_nodes_vector)
{
int new_total_weight_to_forward = INVALID_EDGE_WEIGHT;
int new_total_weight_to_reverse = INVALID_EDGE_WEIGHT;
std::vector<NodeID> packed_leg_to_forward;
std::vector<NodeID> packed_leg_to_reverse;
const auto &source_phantom = phantom_node_pair.source_phantom;
const auto &target_phantom = phantom_node_pair.target_phantom;
bool search_to_forward_node = target_phantom.IsValidForwardTarget();
bool search_to_reverse_node = target_phantom.IsValidReverseTarget();
BOOST_ASSERT(!search_from_forward_node || source_phantom.IsValidForwardSource());
BOOST_ASSERT(!search_from_reverse_node || source_phantom.IsValidReverseSource());
if (search_to_reverse_node || search_to_forward_node)
{
if (allow_uturn_at_waypoint)
{
searchWithUTurn(engine_working_data,
facade,
forward_heap,
reverse_heap,
search_from_forward_node,
search_from_reverse_node,
search_to_forward_node,
search_to_reverse_node,
source_phantom,
target_phantom,
total_weight_to_forward,
total_weight_to_reverse,
new_total_weight_to_forward,
packed_leg_to_forward);
// if only the reverse node is valid (e.g. when using the match plugin) we
// actually need to move
if (!target_phantom.IsValidForwardTarget())
{
BOOST_ASSERT(target_phantom.IsValidReverseTarget());
new_total_weight_to_reverse = new_total_weight_to_forward;
packed_leg_to_reverse = std::move(packed_leg_to_forward);
new_total_weight_to_forward = INVALID_EDGE_WEIGHT;
// (*)
//
// Below we have to check if new_total_weight_to_forward is invalid.
// This prevents use-after-move on packed_leg_to_forward.
}
else if (target_phantom.IsValidReverseTarget())
{
new_total_weight_to_reverse = new_total_weight_to_forward;
packed_leg_to_reverse = packed_leg_to_forward;
}
}
else
{
search(engine_working_data,
facade,
forward_heap,
reverse_heap,
search_from_forward_node,
search_from_reverse_node,
search_to_forward_node,
search_to_reverse_node,
source_phantom,
target_phantom,
total_weight_to_forward,
total_weight_to_reverse,
new_total_weight_to_forward,
new_total_weight_to_reverse,
packed_leg_to_forward,
packed_leg_to_reverse);
}
}
// Note: To make sure we do not access the moved-from packed_leg_to_forward
// we guard its access by a check for invalid edge weight. See (*) above.
// No path found for both target nodes?
if ((INVALID_EDGE_WEIGHT == new_total_weight_to_forward) &&
(INVALID_EDGE_WEIGHT == new_total_weight_to_reverse))
{
return raw_route_data;
}
// we need to figure out how the new legs connect to the previous ones
if (current_leg > 0)
{
bool forward_to_forward =
(new_total_weight_to_forward != INVALID_EDGE_WEIGHT) &&
packed_leg_to_forward.front() == source_phantom.forward_segment_id.id;
bool reverse_to_forward =
(new_total_weight_to_forward != INVALID_EDGE_WEIGHT) &&
packed_leg_to_forward.front() == source_phantom.reverse_segment_id.id;
bool forward_to_reverse =
(new_total_weight_to_reverse != INVALID_EDGE_WEIGHT) &&
packed_leg_to_reverse.front() == source_phantom.forward_segment_id.id;
bool reverse_to_reverse =
(new_total_weight_to_reverse != INVALID_EDGE_WEIGHT) &&
packed_leg_to_reverse.front() == source_phantom.reverse_segment_id.id;
BOOST_ASSERT(!forward_to_forward || !reverse_to_forward);
BOOST_ASSERT(!forward_to_reverse || !reverse_to_reverse);
// in this case we always need to copy
if (forward_to_forward && forward_to_reverse)
{
// in this case we copy the path leading to the source forward node
// and change the case
total_packed_path_to_reverse = total_packed_path_to_forward;
packed_leg_to_reverse_begin = packed_leg_to_forward_begin;
forward_to_reverse = false;
reverse_to_reverse = true;
}
else if (reverse_to_forward && reverse_to_reverse)
{
total_packed_path_to_forward = total_packed_path_to_reverse;
packed_leg_to_forward_begin = packed_leg_to_reverse_begin;
reverse_to_forward = false;
forward_to_forward = true;
}
BOOST_ASSERT(!forward_to_forward || !forward_to_reverse);
BOOST_ASSERT(!reverse_to_forward || !reverse_to_reverse);
// in this case we just need to swap to regain the correct mapping
if (reverse_to_forward || forward_to_reverse)
{
total_packed_path_to_forward.swap(total_packed_path_to_reverse);
packed_leg_to_forward_begin.swap(packed_leg_to_reverse_begin);
}
}
if (new_total_weight_to_forward != INVALID_EDGE_WEIGHT)
{
BOOST_ASSERT(target_phantom.IsValidForwardTarget());
packed_leg_to_forward_begin.push_back(total_packed_path_to_forward.size());
total_packed_path_to_forward.insert(total_packed_path_to_forward.end(),
packed_leg_to_forward.begin(),
packed_leg_to_forward.end());
search_from_forward_node = true;
}
else
{
total_packed_path_to_forward.clear();
packed_leg_to_forward_begin.clear();
search_from_forward_node = false;
}
if (new_total_weight_to_reverse != INVALID_EDGE_WEIGHT)
{
BOOST_ASSERT(target_phantom.IsValidReverseTarget());
packed_leg_to_reverse_begin.push_back(total_packed_path_to_reverse.size());
total_packed_path_to_reverse.insert(total_packed_path_to_reverse.end(),
packed_leg_to_reverse.begin(),
packed_leg_to_reverse.end());
search_from_reverse_node = true;
}
else
{
total_packed_path_to_reverse.clear();
packed_leg_to_reverse_begin.clear();
search_from_reverse_node = false;
}
prev_packed_leg_to_forward = std::move(packed_leg_to_forward);
prev_packed_leg_to_reverse = std::move(packed_leg_to_reverse);
total_weight_to_forward = new_total_weight_to_forward;
total_weight_to_reverse = new_total_weight_to_reverse;
++current_leg;
}
BOOST_ASSERT(total_weight_to_forward != INVALID_EDGE_WEIGHT ||
total_weight_to_reverse != INVALID_EDGE_WEIGHT);
// We make sure the fastest route is always in packed_legs_to_forward
if (total_weight_to_forward < total_weight_to_reverse ||
(total_weight_to_forward == total_weight_to_reverse &&
total_packed_path_to_forward.size() < total_packed_path_to_reverse.size()))
{
// insert sentinel
packed_leg_to_forward_begin.push_back(total_packed_path_to_forward.size());
BOOST_ASSERT(packed_leg_to_forward_begin.size() == phantom_nodes_vector.size() + 1);
unpackLegs(facade,
phantom_nodes_vector,
total_packed_path_to_forward,
packed_leg_to_forward_begin,
total_weight_to_forward,
raw_route_data);
}
else
{
// insert sentinel
packed_leg_to_reverse_begin.push_back(total_packed_path_to_reverse.size());
BOOST_ASSERT(packed_leg_to_reverse_begin.size() == phantom_nodes_vector.size() + 1);
unpackLegs(facade,
phantom_nodes_vector,
total_packed_path_to_reverse,
packed_leg_to_reverse_begin,
total_weight_to_reverse,
raw_route_data);
}
return raw_route_data;
}
template InternalRouteResult
shortestPathSearch(SearchEngineData<ch::Algorithm> &engine_working_data,
const datafacade::ContiguousInternalMemoryDataFacade<ch::Algorithm> &facade,
const std::vector<PhantomNodes> &phantom_nodes_vector,
const boost::optional<bool> continue_straight_at_waypoint);
template InternalRouteResult
shortestPathSearch(SearchEngineData<corech::Algorithm> &engine_working_data,
const datafacade::ContiguousInternalMemoryDataFacade<corech::Algorithm> &facade,
const std::vector<PhantomNodes> &phantom_nodes_vector,
const boost::optional<bool> continue_straight_at_waypoint);
template InternalRouteResult
shortestPathSearch(SearchEngineData<mld::Algorithm> &engine_working_data,
const datafacade::ContiguousInternalMemoryDataFacade<mld::Algorithm> &facade,
const std::vector<PhantomNodes> &phantom_nodes_vector,
const boost::optional<bool> continue_straight_at_waypoint);
} // namespace routing_algorithms
} // namespace engine
} // namespace osrm
| 1 | 21,672 | This can happen because we could get `PhantomNodes` that are not admissible as source/target right? | Project-OSRM-osrm-backend | cpp |
@@ -19,6 +19,9 @@ func init() {
if !structsEqual(&tls.ClientSessionState{}, &qtls.ClientSessionState{}) {
panic("qtls.ClientSessionState not compatible with tls.ClientSessionState")
}
+ if !structsEqual(&tls.ClientSessionState{}, &clientSessionState{}) {
+ panic("clientSessionState not compatible with tls.ClientSessionState")
+ }
}
func structsEqual(a, b interface{}) bool { | 1 | package handshake
// This package uses unsafe to convert between:
// * qtls.ConnectionState and tls.ConnectionState
// * qtls.ClientSessionState and tls.ClientSessionState
// We check in init() that this conversion actually is safe.
import (
"crypto/tls"
"reflect"
"github.com/marten-seemann/qtls"
)
func init() {
if !structsEqual(&tls.ConnectionState{}, &qtls.ConnectionState{}) {
panic("qtls.ConnectionState not compatible with tls.ConnectionState")
}
if !structsEqual(&tls.ClientSessionState{}, &qtls.ClientSessionState{}) {
panic("qtls.ClientSessionState not compatible with tls.ClientSessionState")
}
}
func structsEqual(a, b interface{}) bool {
sa := reflect.ValueOf(a).Elem()
sb := reflect.ValueOf(b).Elem()
if sa.NumField() != sb.NumField() {
return false
}
for i := 0; i < sa.NumField(); i++ {
fa := sa.Type().Field(i)
fb := sb.Type().Field(i)
if !reflect.DeepEqual(fa.Index, fb.Index) || fa.Name != fb.Name || fa.Anonymous != fb.Anonymous || fa.Offset != fb.Offset || !reflect.DeepEqual(fa.Type, fb.Type) {
return false
}
}
return true
}
| 1 | 8,413 | Should we add a note here to file a bug if this ever happens? | lucas-clemente-quic-go | go |
@@ -455,7 +455,14 @@ func (api *BasicMarketAPI) GetDealChangeRequestInfo(ctx context.Context, dealID
}
func (api *BasicMarketAPI) GetNumBenchmarks(ctx context.Context) (int, error) {
- return NumCurrentBenchmarks, nil
+ num, err := api.marketContract.GetBenchmarksQuantity(getCallOptions(ctx))
+ if err != nil {
+ return 0, err
+ }
+ if !num.IsInt64() {
+ return 0, errors.New("benchmarks quantity overflows int64")
+ }
+ return int(num.Int64()), nil
}
type ProfileRegistry struct { | 1 | package blockchain
import (
"context"
"crypto/ecdsa"
"math/big"
"strings"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/noxiouz/zapctx/ctxlog"
"github.com/pkg/errors"
"github.com/sonm-io/core/blockchain/market"
marketAPI "github.com/sonm-io/core/blockchain/market/api"
pb "github.com/sonm-io/core/proto"
"go.uber.org/zap"
)
const (
NumCurrentBenchmarks = 12
)
type API interface {
ProfileRegistry() ProfileRegistryAPI
Events() EventsAPI
Market() MarketAPI
Blacklist() BlacklistAPI
LiveToken() TokenAPI
SideToken() TokenAPI
TestToken() TestTokenAPI
}
type ProfileRegistryAPI interface {
GetValidator(ctx context.Context, validatorID common.Address) (*pb.Validator, error)
GetCertificate(ctx context.Context, certificateID *big.Int) (*pb.Certificate, error)
}
type EventsAPI interface {
GetEvents(ctx context.Context, fromBlockInitial *big.Int) (chan *Event, error)
}
type MarketAPI interface {
OpenDeal(ctx context.Context, key *ecdsa.PrivateKey, askID, bigID *big.Int) <-chan DealOrError
CloseDeal(ctx context.Context, key *ecdsa.PrivateKey, dealID *big.Int, blacklisted bool) <-chan error
GetDealInfo(ctx context.Context, dealID *big.Int) (*pb.Deal, error)
GetDealsAmount(ctx context.Context) (*big.Int, error)
PlaceOrder(ctx context.Context, key *ecdsa.PrivateKey, order *pb.Order) <-chan OrderOrError
CancelOrder(ctx context.Context, key *ecdsa.PrivateKey, id *big.Int) <-chan error
GetOrderInfo(ctx context.Context, orderID *big.Int) (*pb.Order, error)
GetOrdersAmount(ctx context.Context) (*big.Int, error)
Bill(ctx context.Context, key *ecdsa.PrivateKey, dealID *big.Int) <-chan error
RegisterWorker(ctx context.Context, key *ecdsa.PrivateKey, master common.Address) (*types.Transaction, error)
ConfirmWorker(ctx context.Context, key *ecdsa.PrivateKey, slave common.Address) (*types.Transaction, error)
RemoveWorker(ctx context.Context, key *ecdsa.PrivateKey, master, slave common.Address) (*types.Transaction, error)
GetMaster(ctx context.Context, key *ecdsa.PrivateKey, slave common.Address) (common.Address, error)
GetDealChangeRequestInfo(ctx context.Context, dealID *big.Int) (*pb.DealChangeRequest, error)
GetNumBenchmarks(ctx context.Context) (int, error)
}
type BlacklistAPI interface {
Check(ctx context.Context, who, whom common.Address) (bool, error)
Add(ctx context.Context, key *ecdsa.PrivateKey, who, whom common.Address) (*types.Transaction, error)
Remove(ctx context.Context, key *ecdsa.PrivateKey, whom common.Address) (*types.Transaction, error)
AddMaster(ctx context.Context, key *ecdsa.PrivateKey, root common.Address) (*types.Transaction, error)
RemoveMaster(ctx context.Context, key *ecdsa.PrivateKey, root common.Address) (*types.Transaction, error)
SetMarketAddress(ctx context.Context, key *ecdsa.PrivateKey, market common.Address) (*types.Transaction, error)
}
// TokenAPI is a go implementation of ERC20-compatibility token with full functionality high-level interface
// standard description with placed: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md
type TokenAPI interface {
// Approve - add allowance from caller to other contract to spend tokens
Approve(ctx context.Context, key *ecdsa.PrivateKey, to string, amount *big.Int) (*types.Transaction, error)
// Transfer token from caller
Transfer(ctx context.Context, key *ecdsa.PrivateKey, to string, amount *big.Int) (*types.Transaction, error)
// TransferFrom fallback function for contracts to transfer you allowance
TransferFrom(ctx context.Context, key *ecdsa.PrivateKey, from string, to string, amount *big.Int) (*types.Transaction, error)
// BalanceOf returns balance of given address
BalanceOf(ctx context.Context, address string) (*big.Int, error)
// AllowanceOf returns allowance of given address to spender account
AllowanceOf(ctx context.Context, from string, to string) (*big.Int, error)
// TotalSupply - all amount of emitted token
TotalSupply(ctx context.Context) (*big.Int, error)
}
type TestTokenAPI interface {
// GetTokens - send 100 SNMT token for message caller
// this function added for MVP purposes and has been deleted later
GetTokens(ctx context.Context, key *ecdsa.PrivateKey) (*types.Transaction, error)
}
type BasicAPI struct {
market MarketAPI
liveToken TokenAPI
sideToken TokenAPI
testToken TestTokenAPI
blacklist BlacklistAPI
profileRegistry ProfileRegistryAPI
events EventsAPI
}
func NewAPI(opts ...Option) (API, error) {
defaults := defaultOptions()
for _, o := range opts {
o(defaults)
}
client, err := initEthClient(defaults.apiEndpoint)
if err != nil {
return nil, err
}
liveToken, err := NewStandardToken(client, market.SNMAddr(), defaults.gasPrice)
if err != nil {
return nil, err
}
testToken, err := NewTestToken(client, market.SNMAddr(), defaults.gasPrice)
if err != nil {
return nil, err
}
clientSidechain, err := initEthClient(defaults.apiSidechainEndpoint)
if err != nil {
return nil, err
}
blacklist, err := NewBasicBlacklist(clientSidechain, market.BlacklistAddr(), defaults.gasPriceSidechain)
if err != nil {
return nil, err
}
marketApi, err := NewBasicMarket(clientSidechain, market.MarketAddr(), defaults.gasPriceSidechain, defaults.logParsePeriod)
if err != nil {
return nil, err
}
profileRegistry, err := NewProfileRegistry(clientSidechain, market.ProfileRegistryAddr(), defaults.gasPriceSidechain)
if err != nil {
return nil, err
}
sideToken, err := NewStandardToken(clientSidechain, market.SNMSidechainAddr(), defaults.gasPriceSidechain)
if err != nil {
return nil, err
}
events, err := NewEventsAPI(clientSidechain, ctxlog.GetLogger(context.Background()))
if err != nil {
return nil, err
}
return &BasicAPI{
market: marketApi,
blacklist: blacklist,
profileRegistry: profileRegistry,
liveToken: liveToken,
sideToken: sideToken,
testToken: testToken,
events: events,
}, nil
}
func (api *BasicAPI) Market() MarketAPI {
return api.market
}
func (api *BasicAPI) LiveToken() TokenAPI {
return api.liveToken
}
func (api *BasicAPI) SideToken() TokenAPI {
return api.sideToken
}
func (api *BasicAPI) TestToken() TestTokenAPI {
return api.testToken
}
func (api *BasicAPI) Blacklist() BlacklistAPI {
return api.blacklist
}
func (api *BasicAPI) ProfileRegistry() ProfileRegistryAPI {
return api.profileRegistry
}
func (api *BasicAPI) Events() EventsAPI {
return api.events
}
type BasicMarketAPI struct {
client *ethclient.Client
marketContract *marketAPI.Market
gasPrice int64
logParsePeriod time.Duration
}
func NewBasicMarket(client *ethclient.Client, address common.Address, gasPrice int64, logParsePeriod time.Duration) (MarketAPI, error) {
marketContract, err := marketAPI.NewMarket(address, client)
if err != nil {
return nil, err
}
return &BasicMarketAPI{
client: client,
marketContract: marketContract,
gasPrice: gasPrice,
logParsePeriod: logParsePeriod,
}, nil
}
func (api *BasicMarketAPI) OpenDeal(ctx context.Context, key *ecdsa.PrivateKey, askID, bidID *big.Int) <-chan DealOrError {
ch := make(chan DealOrError, 0)
go api.openDeal(ctx, key, askID, bidID, ch)
return ch
}
func (api *BasicMarketAPI) openDeal(ctx context.Context, key *ecdsa.PrivateKey, askID, bidID *big.Int, ch chan DealOrError) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
tx, err := api.marketContract.OpenDeal(opts, askID, bidID)
if err != nil {
ch <- DealOrError{nil, err}
return
}
id, err := waitForTransactionResult(ctx, api.client, api.logParsePeriod, tx, market.DealOpenedTopic)
if err != nil {
ch <- DealOrError{nil, err}
return
}
deal, err := api.GetDealInfo(ctx, id)
ch <- DealOrError{deal, err}
}
func (api *BasicMarketAPI) CloseDeal(ctx context.Context, key *ecdsa.PrivateKey, dealID *big.Int, blacklisted bool) <-chan error {
ch := make(chan error, 0)
go api.closeDeal(ctx, key, dealID, blacklisted, ch)
return ch
}
func (api *BasicMarketAPI) closeDeal(ctx context.Context, key *ecdsa.PrivateKey, dealID *big.Int, blacklisted bool, ch chan error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
tx, err := api.marketContract.CloseDeal(opts, dealID, blacklisted)
if err != nil {
ch <- err
return
}
_, err = waitForTransactionResult(ctx, api.client, api.logParsePeriod, tx, market.DealUpdatedTopic)
if err != nil {
ch <- err
return
}
ch <- nil
}
func (api *BasicMarketAPI) GetDealInfo(ctx context.Context, dealID *big.Int) (*pb.Deal, error) {
deal1, err := api.marketContract.GetDealInfo(getCallOptions(ctx), dealID)
if err != nil {
return nil, err
}
deal2, err := api.marketContract.GetDealParams(getCallOptions(ctx), dealID)
if err != nil {
return nil, err
}
benchmarks, err := pb.NewBenchmarks(deal1.Benchmarks)
if err != nil {
return nil, err
}
return &pb.Deal{
Id: pb.NewBigInt(dealID),
Benchmarks: benchmarks,
SupplierID: pb.NewEthAddress(deal1.SupplierID),
ConsumerID: pb.NewEthAddress(deal1.ConsumerID),
MasterID: pb.NewEthAddress(deal1.MasterID),
AskID: pb.NewBigInt(deal1.AskID),
BidID: pb.NewBigInt(deal1.BidID),
Duration: deal2.Duration.Uint64(),
Price: pb.NewBigInt(deal2.Price),
StartTime: &pb.Timestamp{Seconds: deal1.StartTime.Int64()},
EndTime: &pb.Timestamp{Seconds: deal2.EndTime.Int64()},
Status: pb.DealStatus(deal2.Status),
BlockedBalance: pb.NewBigInt(deal2.BlockedBalance),
TotalPayout: pb.NewBigInt(deal2.TotalPayout),
LastBillTS: &pb.Timestamp{Seconds: deal2.LastBillTS.Int64()},
}, nil
}
func (api *BasicMarketAPI) GetDealsAmount(ctx context.Context) (*big.Int, error) {
return api.marketContract.GetDealsAmount(getCallOptions(ctx))
}
func (api *BasicMarketAPI) PlaceOrder(ctx context.Context, key *ecdsa.PrivateKey, order *pb.Order) <-chan OrderOrError {
ch := make(chan OrderOrError, 0)
go api.placeOrder(ctx, key, order, ch)
return ch
}
func (api *BasicMarketAPI) placeOrder(ctx context.Context, key *ecdsa.PrivateKey, order *pb.Order, ch chan OrderOrError) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
fixedNetflags := pb.UintToNetflags(order.Netflags)
var fixedTag [32]byte
copy(fixedTag[:], order.Tag[:])
tx, err := api.marketContract.PlaceOrder(opts,
uint8(order.OrderType),
order.CounterpartyID.Unwrap(),
big.NewInt(int64(order.Duration)),
order.Price.Unwrap(),
fixedNetflags,
uint8(order.IdentityLevel),
common.HexToAddress(order.Blacklist),
fixedTag,
order.GetBenchmarks().ToArray(),
)
if err != nil {
ch <- OrderOrError{nil, err}
return
}
id, err := waitForTransactionResult(ctx, api.client, api.logParsePeriod, tx, market.OrderPlacedTopic)
if err != nil {
ch <- OrderOrError{nil, err}
return
}
orderInfo, err := api.GetOrderInfo(ctx, id)
ch <- OrderOrError{orderInfo, err}
}
func (api *BasicMarketAPI) CancelOrder(ctx context.Context, key *ecdsa.PrivateKey, id *big.Int) <-chan error {
ch := make(chan error, 0)
go api.cancelOrder(ctx, key, id, ch)
return ch
}
func (api *BasicMarketAPI) cancelOrder(ctx context.Context, key *ecdsa.PrivateKey, id *big.Int, ch chan error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
tx, err := api.marketContract.CancelOrder(opts, id)
if err != nil {
ch <- err
return
}
if _, err := waitForTransactionResult(ctx, api.client, api.logParsePeriod, tx, market.OrderUpdatedTopic); err != nil {
ch <- err
return
}
ch <- nil
}
func (api *BasicMarketAPI) GetOrderInfo(ctx context.Context, orderID *big.Int) (*pb.Order, error) {
order1, err := api.marketContract.GetOrderInfo(getCallOptions(ctx), orderID)
if err != nil {
return nil, err
}
order2, err := api.marketContract.GetOrderParams(getCallOptions(ctx), orderID)
if err != nil {
return nil, err
}
netflags := pb.NetflagsToUint(order1.Netflags)
benchmarks, err := pb.NewBenchmarks(order1.Benchmarks)
if err != nil {
return nil, err
}
return &pb.Order{
Id: pb.NewBigInt(orderID),
DealID: pb.NewBigInt(order2.DealID),
OrderType: pb.OrderType(order1.OrderType),
OrderStatus: pb.OrderStatus(order2.OrderStatus),
AuthorID: pb.NewEthAddress(order1.Author),
CounterpartyID: pb.NewEthAddress(order1.Counterparty),
Duration: order1.Duration.Uint64(),
Price: pb.NewBigInt(order1.Price),
Netflags: netflags,
IdentityLevel: pb.IdentityLevel(order1.IdentityLevel),
Blacklist: order1.Blacklist.String(),
Tag: order1.Tag[:],
Benchmarks: benchmarks,
FrozenSum: pb.NewBigInt(order1.FrozenSum),
}, nil
}
func (api *BasicMarketAPI) GetOrdersAmount(ctx context.Context) (*big.Int, error) {
return api.marketContract.GetOrdersAmount(getCallOptions(ctx))
}
func (api *BasicMarketAPI) Bill(ctx context.Context, key *ecdsa.PrivateKey, dealID *big.Int) <-chan error {
ch := make(chan error, 0)
go api.bill(ctx, key, dealID, ch)
return ch
}
func (api *BasicMarketAPI) bill(ctx context.Context, key *ecdsa.PrivateKey, dealID *big.Int, ch chan error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
tx, err := api.marketContract.Bill(opts, dealID)
if err != nil {
ch <- err
return
}
if _, err := waitForTransactionResult(ctx, api.client, api.logParsePeriod, tx, market.BilledTopic); err != nil {
ch <- err
return
}
ch <- nil
}
func (api *BasicMarketAPI) RegisterWorker(ctx context.Context, key *ecdsa.PrivateKey, master common.Address) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
return api.marketContract.RegisterWorker(opts, master)
}
func (api *BasicMarketAPI) ConfirmWorker(ctx context.Context, key *ecdsa.PrivateKey, slave common.Address) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
return api.marketContract.RegisterWorker(opts, slave)
}
func (api *BasicMarketAPI) RemoveWorker(ctx context.Context, key *ecdsa.PrivateKey, master, slave common.Address) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
return api.marketContract.RemoveWorker(opts, master, slave)
}
func (api *BasicMarketAPI) GetMaster(ctx context.Context, key *ecdsa.PrivateKey, slave common.Address) (common.Address, error) {
return api.marketContract.GetMaster(getCallOptions(ctx), slave)
}
func (api *BasicMarketAPI) GetDealChangeRequestInfo(ctx context.Context, dealID *big.Int) (*pb.DealChangeRequest, error) {
changeRequest, err := api.marketContract.GetChangeRequestInfo(getCallOptions(ctx), dealID)
if err != nil {
return nil, err
}
return &pb.DealChangeRequest{
DealID: pb.NewBigInt(changeRequest.DealID),
RequestType: pb.OrderType(changeRequest.RequestType),
Duration: changeRequest.Duration.Uint64(),
Price: pb.NewBigInt(changeRequest.Price),
Status: pb.ChangeRequestStatus(changeRequest.Status),
}, nil
}
func (api *BasicMarketAPI) GetNumBenchmarks(ctx context.Context) (int, error) {
return NumCurrentBenchmarks, nil
}
type ProfileRegistry struct {
client *ethclient.Client
profileRegistryContract *marketAPI.ProfileRegistry
gasPrice int64
}
func NewProfileRegistry(client *ethclient.Client, address common.Address, gasPrice int64) (ProfileRegistryAPI, error) {
profileRegistryContract, err := marketAPI.NewProfileRegistry(address, client)
if err != nil {
return nil, err
}
return &ProfileRegistry{
client: client,
profileRegistryContract: profileRegistryContract,
gasPrice: gasPrice,
}, nil
}
func (api *ProfileRegistry) GetValidator(ctx context.Context, validatorID common.Address) (*pb.Validator, error) {
level, err := api.profileRegistryContract.GetValidatorLevel(getCallOptions(ctx), validatorID)
if err != nil {
return nil, err
}
return &pb.Validator{
Id: pb.NewEthAddress(validatorID),
Level: uint64(level),
}, nil
}
func (api *ProfileRegistry) GetCertificate(ctx context.Context, certificateID *big.Int) (*pb.Certificate, error) {
validatorID, ownerID, attribute, value, err := api.profileRegistryContract.GetCertificate(getCallOptions(ctx), certificateID)
if err != nil {
return nil, err
}
return &pb.Certificate{
ValidatorID: pb.NewEthAddress(validatorID),
OwnerID: pb.NewEthAddress(ownerID),
Attribute: attribute.Uint64(),
Value: value,
}, nil
}
type BasicBlacklistAPI struct {
client *ethclient.Client
blacklistContract *marketAPI.Blacklist
gasPrice int64
}
func NewBasicBlacklist(client *ethclient.Client, address common.Address, gasPrice int64) (BlacklistAPI, error) {
blacklistContract, err := marketAPI.NewBlacklist(address, client)
if err != nil {
return nil, err
}
return &BasicBlacklistAPI{
client: client,
blacklistContract: blacklistContract,
gasPrice: gasPrice,
}, nil
}
func (api *BasicBlacklistAPI) Check(ctx context.Context, who, whom common.Address) (bool, error) {
return api.blacklistContract.Check(getCallOptions(ctx), who, whom)
}
func (api *BasicBlacklistAPI) Add(ctx context.Context, key *ecdsa.PrivateKey, who, whom common.Address) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
return api.blacklistContract.Add(opts, who, whom)
}
func (api *BasicBlacklistAPI) Remove(ctx context.Context, key *ecdsa.PrivateKey, whom common.Address) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
return api.blacklistContract.Remove(opts, whom)
}
func (api *BasicBlacklistAPI) AddMaster(ctx context.Context, key *ecdsa.PrivateKey, root common.Address) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
return api.blacklistContract.AddMaster(opts, root)
}
func (api *BasicBlacklistAPI) RemoveMaster(ctx context.Context, key *ecdsa.PrivateKey, root common.Address) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
return api.blacklistContract.RemoveMaster(opts, root)
}
func (api *BasicBlacklistAPI) SetMarketAddress(ctx context.Context, key *ecdsa.PrivateKey, market common.Address) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
return api.blacklistContract.SetMarketAddress(opts, market)
}
type StandardTokenApi struct {
client *ethclient.Client
tokenContract *marketAPI.StandardToken
gasPrice int64
}
func NewStandardToken(client *ethclient.Client, address common.Address, gasPrice int64) (TokenAPI, error) {
tokenContract, err := marketAPI.NewStandardToken(address, client)
if err != nil {
return nil, err
}
return &StandardTokenApi{
client: client,
tokenContract: tokenContract,
gasPrice: gasPrice,
}, nil
}
func (api *StandardTokenApi) BalanceOf(ctx context.Context, address string) (*big.Int, error) {
return api.tokenContract.BalanceOf(getCallOptions(ctx), common.HexToAddress(address))
}
func (api *StandardTokenApi) AllowanceOf(ctx context.Context, from string, to string) (*big.Int, error) {
return api.tokenContract.Allowance(getCallOptions(ctx), common.HexToAddress(from), common.HexToAddress(to))
}
func (api *StandardTokenApi) Approve(ctx context.Context, key *ecdsa.PrivateKey, to string, amount *big.Int) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimit, api.gasPrice)
return api.tokenContract.Approve(opts, common.HexToAddress(to), amount)
}
func (api *StandardTokenApi) Transfer(ctx context.Context, key *ecdsa.PrivateKey, to string, amount *big.Int) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimit, api.gasPrice)
return api.tokenContract.Transfer(opts, common.HexToAddress(to), amount)
}
func (api *StandardTokenApi) TransferFrom(ctx context.Context, key *ecdsa.PrivateKey, from string, to string, amount *big.Int) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimit, api.gasPrice)
return api.tokenContract.TransferFrom(opts, common.HexToAddress(from), common.HexToAddress(to), amount)
}
func (api *StandardTokenApi) TotalSupply(ctx context.Context) (*big.Int, error) {
return api.tokenContract.TotalSupply(getCallOptions(ctx))
}
type TestTokenApi struct {
client *ethclient.Client
tokenContract *marketAPI.SNMTToken
gasPrice int64
}
func NewTestToken(client *ethclient.Client, address common.Address, gasPrice int64) (TestTokenAPI, error) {
tokenContract, err := marketAPI.NewSNMTToken(address, client)
if err != nil {
return nil, err
}
return &TestTokenApi{
client: client,
tokenContract: tokenContract,
gasPrice: gasPrice,
}, nil
}
func (api *TestTokenApi) GetTokens(ctx context.Context, key *ecdsa.PrivateKey) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimit, api.gasPrice)
return api.tokenContract.GetTokens(opts)
}
type BasicEventsAPI struct {
client *ethclient.Client
logger *zap.Logger
marketABI abi.ABI
profilesABI abi.ABI
}
func NewEventsAPI(client *ethclient.Client, logger *zap.Logger) (EventsAPI, error) {
marketABI, err := abi.JSON(strings.NewReader(marketAPI.MarketABI))
if err != nil {
return nil, err
}
profilesABI, err := abi.JSON(strings.NewReader(marketAPI.ProfileRegistryABI))
if err != nil {
return nil, err
}
return &BasicEventsAPI{
client: client,
logger: logger,
marketABI: marketABI,
profilesABI: profilesABI,
}, nil
}
func (api *BasicEventsAPI) GetEvents(ctx context.Context, fromBlockInitial *big.Int) (chan *Event, error) {
var (
topics [][]common.Hash
eventTopic = []common.Hash{
market.DealOpenedTopic,
market.DealUpdatedTopic,
market.OrderPlacedTopic,
market.OrderUpdatedTopic,
market.DealChangeRequestSentTopic,
market.DealChangeRequestUpdatedTopic,
market.BilledTopic,
market.WorkerAnnouncedTopic,
market.WorkerConfirmedTopic,
market.WorkerConfirmedTopic,
market.WorkerRemovedTopic,
market.AddedToBlacklistTopic,
market.RemovedFromBlacklistTopic,
market.ValidatorCreatedTopic,
market.ValidatorDeletedTopic,
market.CertificateCreatedTopic,
}
out = make(chan *Event, 128)
)
topics = append(topics, eventTopic)
go func() {
var (
lastLogBlockNumber = fromBlockInitial.Uint64()
fromBlock = fromBlockInitial.Uint64()
tk = time.NewTicker(time.Second)
)
for {
select {
case <-ctx.Done():
return
case <-tk.C:
logs, err := api.client.FilterLogs(ctx, ethereum.FilterQuery{
Topics: topics,
FromBlock: big.NewInt(0).SetUint64(fromBlock),
Addresses: []common.Address{
market.MarketAddr(),
market.BlacklistAddr(),
market.ProfileRegistryAddr(),
},
})
if err != nil {
out <- &Event{
Data: &ErrorData{Err: errors.Wrap(err, "failed to FilterLogs")},
BlockNumber: fromBlock,
}
}
numLogs := len(logs)
if numLogs < 1 {
api.logger.Info("no logs, skipping")
continue
}
var eventTS uint64
for _, log := range logs {
// Skip logs from the last seen block.
if log.BlockNumber == fromBlock {
continue
}
// Update eventTS if we've got a new block.
if lastLogBlockNumber != log.BlockNumber {
lastLogBlockNumber = log.BlockNumber
block, err := api.client.BlockByNumber(ctx, big.NewInt(0).SetUint64(lastLogBlockNumber))
if err != nil {
api.logger.Warn("failed to get event timestamp", zap.Error(err),
zap.Uint64("blockNumber", lastLogBlockNumber))
} else {
eventTS = block.Time().Uint64()
}
}
api.processLog(log, eventTS, out)
}
fromBlock = logs[numLogs-1].BlockNumber
}
}
}()
return out, nil
}
func (api *BasicEventsAPI) processLog(log types.Log, eventTS uint64, out chan *Event) {
// This should never happen, but it's ethereum, and things might happen.
if len(log.Topics) < 1 {
out <- &Event{
Data: &ErrorData{Err: errors.New("malformed log entry"), Topic: "unknown"},
BlockNumber: log.BlockNumber,
}
return
}
sendErr := func(out chan *Event, err error, topic common.Hash) {
out <- &Event{Data: &ErrorData{Err: err, Topic: topic.String()}, BlockNumber: log.BlockNumber, TS: eventTS}
}
sendData := func(data interface{}) {
out <- &Event{Data: data, BlockNumber: log.BlockNumber, TS: eventTS}
}
var topic = log.Topics[0]
switch topic {
case market.DealOpenedTopic:
id, err := extractBig(log, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&DealOpenedData{ID: id})
case market.DealUpdatedTopic:
id, err := extractBig(log, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&DealUpdatedData{ID: id})
case market.DealChangeRequestSentTopic:
id, err := extractBig(log, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&DealChangeRequestSentData{ID: id})
case market.DealChangeRequestUpdatedTopic:
id, err := extractBig(log, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&DealChangeRequestUpdatedData{ID: id})
case market.BilledTopic:
var billedData = &BilledData{}
if err := api.marketABI.Unpack(billedData, "Billed", log.Data); err != nil {
sendErr(out, err, topic)
return
}
sendData(billedData)
case market.OrderPlacedTopic:
id, err := extractBig(log, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&OrderPlacedData{ID: id})
case market.OrderUpdatedTopic:
id, err := extractBig(log, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&OrderUpdatedData{ID: id})
case market.WorkerAnnouncedTopic:
slaveID, err := extractAddress(log, 1)
if err != nil {
sendErr(out, err, topic)
return
}
masterID, err := extractAddress(log, 2)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&WorkerAnnouncedData{SlaveID: slaveID, MasterID: masterID})
case market.WorkerConfirmedTopic:
slaveID, err := extractAddress(log, 1)
if err != nil {
sendErr(out, err, topic)
return
}
masterID, err := extractAddress(log, 2)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&WorkerConfirmedData{SlaveID: slaveID, MasterID: masterID})
case market.WorkerRemovedTopic:
slaveID, err := extractAddress(log, 1)
if err != nil {
sendErr(out, err, topic)
return
}
masterID, err := extractAddress(log, 2)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&WorkerRemovedData{SlaveID: slaveID, MasterID: masterID})
case market.AddedToBlacklistTopic:
adderID, err := extractAddress(log, 1)
if err != nil {
sendErr(out, err, topic)
return
}
addeeID, err := extractAddress(log, 2)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&AddedToBlacklistData{AdderID: adderID, AddeeID: addeeID})
case market.RemovedFromBlacklistTopic:
removerID, err := extractAddress(log, 1)
if err != nil {
sendErr(out, err, topic)
return
}
removeeID, err := extractAddress(log, 2)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&RemovedFromBlacklistData{RemoverID: removerID, RemoveeID: removeeID})
case market.ValidatorCreatedTopic:
id, err := extractAddress(log, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&ValidatorCreatedData{ID: id})
case market.ValidatorDeletedTopic:
id, err := extractAddress(log, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&ValidatorDeletedData{ID: id})
case market.CertificateCreatedTopic:
var id = big.NewInt(0)
if err := api.profilesABI.Unpack(&id, "CertificateCreated", log.Data); err != nil {
sendErr(out, err, topic)
return
}
sendData(&CertificateCreatedData{ID: id})
default:
out <- &Event{
Data: &ErrorData{Err: errors.New("unknown topic"), Topic: topic.String()},
BlockNumber: log.BlockNumber,
}
}
}
| 1 | 6,903 | overflow is still possible, isn't it? | sonm-io-core | go |
@@ -37,6 +37,9 @@ RSpec.configure do |config|
config.use_transactional_fixtures = false
config.use_instantiated_fixtures = false
config.fixture_path = "#{::Rails.root}/spec/fixtures"
+ config.infer_base_class_for_anonymous_controllers = false
+ config.order = "random"
+ config.fail_fast = true
config.include Paperclip::Shoulda::Matchers
config.include EmailSpec::Helpers | 1 | require 'codeclimate-test-reporter'
CodeClimate::TestReporter.configure do |config|
config.logger.level = Logger::WARN
end
CodeClimate::TestReporter.start
if ENV["COVERAGE"]
require 'simplecov'
SimpleCov.start 'rails'
end
ENV["RAILS_ENV"] ||= 'test'
require File.expand_path("../../config/environment", __FILE__)
require 'rspec/autorun'
require 'rspec/rails'
require 'paperclip/matchers'
require 'email_spec'
require 'webmock/rspec'
require 'clearance/testing'
WebMock.disable_net_connect!(allow_localhost: true, allow: 'codeclimate.com')
Dir[File.expand_path(File.join(File.dirname(__FILE__),'support','**','*.rb'))].each {|f| require f}
FakeStripeRunner.boot
FakeGithubRunner.boot
Delayed::Worker.delay_jobs = false
Capybara.javascript_driver = :webkit
Capybara.configure do |config|
config.match = :prefer_exact
config.ignore_hidden_elements = false
end
RSpec.configure do |config|
config.use_transactional_fixtures = false
config.use_instantiated_fixtures = false
config.fixture_path = "#{::Rails.root}/spec/fixtures"
config.include Paperclip::Shoulda::Matchers
config.include EmailSpec::Helpers
config.include EmailSpec::Matchers
config.include FactoryGirl::Syntax::Methods
config.include Subscriptions
config.include PurchaseHelpers
config.include StripeHelpers
config.include SessionHelpers, type: :feature
config.include PaypalHelpers, type: :feature
config.mock_with :mocha
config.treat_symbols_as_metadata_keys_with_true_values = true
end
| 1 | 9,117 | This seems more like a personal preference, so maybe this should be in your `~/.rspec`? | thoughtbot-upcase | rb |
@@ -31,8 +31,8 @@ else:
THREAD_EXCEPTION = thread.error
if WINDOWS:
- from scapy.arch.pcapdnet import PcapTimeoutElapsed
- recv_error = PcapTimeoutElapsed
+ from scapy.error import Scapy_Exception
+ recv_error = Scapy_Exception
else:
recv_error = ()
| 1 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## Copyright (C) Gabriel Potter <gabriel@potter.fr>
## This program is published under a GPLv2 license
"""
Automata with states, transitions and actions.
"""
from __future__ import absolute_import
import types,itertools,time,os,sys,socket,traceback
from select import select
from collections import deque
import threading
from scapy.config import conf
from scapy.utils import do_graph
from scapy.error import log_interactive
from scapy.plist import PacketList
from scapy.data import MTU
from scapy.supersocket import SuperSocket
from scapy.consts import WINDOWS
from scapy.compat import *
import scapy.modules.six as six
try:
import thread
except ImportError:
THREAD_EXCEPTION = RuntimeError
else:
THREAD_EXCEPTION = thread.error
if WINDOWS:
from scapy.arch.pcapdnet import PcapTimeoutElapsed
recv_error = PcapTimeoutElapsed
else:
recv_error = ()
""" In Windows, select.select is not available for custom objects. Here's the implementation of scapy to re-create this functionnality
# Passive way: using no-ressources locks
+---------+ +---------------+ +-------------------------+
| Start +------------->Select_objects +----->+Linux: call select.select|
+---------+ |(select.select)| +-------------------------+
+-------+-------+
|
+----v----+ +--------+
| Windows | |Time Out+----------------------------------+
+----+----+ +----+---+ |
| ^ |
Event | | |
+ | | |
| +-------v-------+ | |
| +------+Selectable Sel.+-----+-----------------+-----------+ |
| | +-------+-------+ | | | v +-----v-----+
+-------v----------+ | | | | | Passive lock<-----+release_all<------+
|Data added to list| +----v-----+ +-----v-----+ +----v-----+ v v + +-----------+ |
+--------+---------+ |Selectable| |Selectable | |Selectable| ............ | |
| +----+-----+ +-----------+ +----------+ | |
| v | |
v +----+------+ +------------------+ +-------------v-------------------+ |
+-----+------+ |wait_return+-->+ check_recv: | | | |
|call_release| +----+------+ |If data is in list| | END state: selectable returned | +---+--------+
+-----+-------- v +-------+----------+ | | | exit door |
| else | +---------------------------------+ +---+--------+
| + | |
| +----v-------+ | |
+--------->free -->Passive lock| | |
+----+-------+ | |
| | |
| v |
+------------------Selectable-Selector-is-advertised-that-the-selectable-is-readable---------+
"""
class SelectableObject:
"""DEV: to implement one of those, you need to add 2 things to your object:
- add "check_recv" function
- call "self.call_release" once you are ready to be read
You can set the __selectable_force_select__ to True in the class, if you want to
force the handler to use fileno(). This may only be useable on sockets created using
the builtin socket API."""
__selectable_force_select__ = False
def check_recv(self):
"""DEV: will be called only once (at beginning) to check if the object is ready."""
raise OSError("This method must be overwriten.")
def _wait_non_ressources(self, callback):
"""This get started as a thread, and waits for the data lock to be freed then advertise itself to the SelectableSelector using the callback"""
self.trigger = threading.Lock()
self.was_ended = False
self.trigger.acquire()
self.trigger.acquire()
if not self.was_ended:
callback(self)
def wait_return(self, callback):
"""Entry point of SelectableObject: register the callback"""
if self.check_recv():
return callback(self)
_t = threading.Thread(target=self._wait_non_ressources, args=(callback,))
_t.setDaemon(True)
_t.start()
def call_release(self, arborted=False):
"""DEV: Must be call when the object becomes ready to read.
Relesases the lock of _wait_non_ressources"""
self.was_ended = arborted
try:
self.trigger.release()
except (THREAD_EXCEPTION, AttributeError):
pass
class SelectableSelector(object):
"""
Select SelectableObject objects.
inputs: objects to process
remain: timeout. If 0, return [].
customTypes: types of the objects that have the check_recv function.
"""
def _release_all(self):
"""Releases all locks to kill all threads"""
for i in self.inputs:
i.call_release(True)
self.available_lock.release()
def _timeout_thread(self, remain):
"""Timeout before releasing every thing, if nothing was returned"""
time.sleep(remain)
if not self._ended:
self._ended = True
self._release_all()
def _exit_door(self, _input):
"""This function is passed to each SelectableObject as a callback
The SelectableObjects have to call it once there are ready"""
self.results.append(_input)
if self._ended:
return
self._ended = True
self._release_all()
def __init__(self, inputs, remain):
self.results = []
self.inputs = list(inputs)
self.remain = remain
self.available_lock = threading.Lock()
self.available_lock.acquire()
self._ended = False
def process(self):
"""Entry point of SelectableSelector"""
if WINDOWS:
select_inputs = []
for i in self.inputs:
if not isinstance(i, SelectableObject):
warning("Unknown ignored object type: %s", type(i))
elif i.__selectable_force_select__:
# Then use select.select
select_inputs.append(i)
elif not self.remain and i.check_recv():
self.results.append(i)
else:
i.wait_return(self._exit_door)
if select_inputs:
# Use default select function
self.results.extend(select(select_inputs, [], [], self.remain)[0])
if not self.remain:
return self.results
threading.Thread(target=self._timeout_thread, args=(self.remain,)).start()
if not self._ended:
self.available_lock.acquire()
return self.results
else:
r,_,_ = select(self.inputs,[],[],self.remain)
return r
def select_objects(inputs, remain):
"""
Select SelectableObject objects. Same than:
select.select([inputs], [], [], remain)
But also works on Windows, only on SelectableObject.
inputs: objects to process
remain: timeout. If 0, return [].
"""
handler = SelectableSelector(inputs, remain)
return handler.process()
class ObjectPipe(SelectableObject):
def __init__(self):
self.rd,self.wr = os.pipe()
self.queue = deque()
def fileno(self):
return self.rd
def check_recv(self):
return len(self.queue) > 0
def send(self, obj):
self.queue.append(obj)
os.write(self.wr,b"X")
self.call_release()
def write(self, obj):
self.send(obj)
def recv(self, n=0):
os.read(self.rd, 1)
return self.queue.popleft()
def read(self, n=0):
return self.recv(n)
class Message:
def __init__(self, **args):
self.__dict__.update(args)
def __repr__(self):
return "<Message %s>" % " ".join("%s=%r"%(k,v)
for (k,v) in six.iteritems(self.__dict__)
if not k.startswith("_"))
class _instance_state:
def __init__(self, instance):
self.__self__ = instance.__self__
self.__func__ = instance.__func__
self.__self__.__class__ = instance.__self__.__class__
def __getattr__(self, attr):
return getattr(self.__func__, attr)
def __call__(self, *args, **kargs):
return self.__func__(self.__self__, *args, **kargs)
def breaks(self):
return self.__self__.add_breakpoints(self.__func__)
def intercepts(self):
return self.__self__.add_interception_points(self.__func__)
def unbreaks(self):
return self.__self__.remove_breakpoints(self.__func__)
def unintercepts(self):
return self.__self__.remove_interception_points(self.__func__)
##############
## Automata ##
##############
class ATMT:
STATE = "State"
ACTION = "Action"
CONDITION = "Condition"
RECV = "Receive condition"
TIMEOUT = "Timeout condition"
IOEVENT = "I/O event"
class NewStateRequested(Exception):
def __init__(self, state_func, automaton, *args, **kargs):
self.func = state_func
self.state = state_func.atmt_state
self.initial = state_func.atmt_initial
self.error = state_func.atmt_error
self.final = state_func.atmt_final
Exception.__init__(self, "Request state [%s]" % self.state)
self.automaton = automaton
self.args = args
self.kargs = kargs
self.action_parameters() # init action parameters
def action_parameters(self, *args, **kargs):
self.action_args = args
self.action_kargs = kargs
return self
def run(self):
return self.func(self.automaton, *self.args, **self.kargs)
def __repr__(self):
return "NewStateRequested(%s)" % self.state
@staticmethod
def state(initial=0,final=0,error=0):
def deco(f,initial=initial, final=final):
f.atmt_type = ATMT.STATE
f.atmt_state = f.__name__
f.atmt_initial = initial
f.atmt_final = final
f.atmt_error = error
def state_wrapper(self, *args, **kargs):
return ATMT.NewStateRequested(f, self, *args, **kargs)
state_wrapper.__name__ = "%s_wrapper" % f.__name__
state_wrapper.atmt_type = ATMT.STATE
state_wrapper.atmt_state = f.__name__
state_wrapper.atmt_initial = initial
state_wrapper.atmt_final = final
state_wrapper.atmt_error = error
state_wrapper.atmt_origfunc = f
return state_wrapper
return deco
@staticmethod
def action(cond, prio=0):
def deco(f,cond=cond):
if not hasattr(f,"atmt_type"):
f.atmt_cond = {}
f.atmt_type = ATMT.ACTION
f.atmt_cond[cond.atmt_condname] = prio
return f
return deco
@staticmethod
def condition(state, prio=0):
def deco(f, state=state):
f.atmt_type = ATMT.CONDITION
f.atmt_state = state.atmt_state
f.atmt_condname = f.__name__
f.atmt_prio = prio
return f
return deco
@staticmethod
def receive_condition(state, prio=0):
def deco(f, state=state):
f.atmt_type = ATMT.RECV
f.atmt_state = state.atmt_state
f.atmt_condname = f.__name__
f.atmt_prio = prio
return f
return deco
@staticmethod
def ioevent(state, name, prio=0, as_supersocket=None):
def deco(f, state=state):
f.atmt_type = ATMT.IOEVENT
f.atmt_state = state.atmt_state
f.atmt_condname = f.__name__
f.atmt_ioname = name
f.atmt_prio = prio
f.atmt_as_supersocket = as_supersocket
return f
return deco
@staticmethod
def timeout(state, timeout):
def deco(f, state=state, timeout=timeout):
f.atmt_type = ATMT.TIMEOUT
f.atmt_state = state.atmt_state
f.atmt_timeout = timeout
f.atmt_condname = f.__name__
return f
return deco
class _ATMT_Command:
RUN = "RUN"
NEXT = "NEXT"
FREEZE = "FREEZE"
STOP = "STOP"
END = "END"
EXCEPTION = "EXCEPTION"
SINGLESTEP = "SINGLESTEP"
BREAKPOINT = "BREAKPOINT"
INTERCEPT = "INTERCEPT"
ACCEPT = "ACCEPT"
REPLACE = "REPLACE"
REJECT = "REJECT"
class _ATMT_supersocket(SuperSocket):
def __init__(self, name, ioevent, automaton, proto, args, kargs):
self.name = name
self.ioevent = ioevent
self.proto = proto
self.spa,self.spb = socket.socketpair(socket.AF_UNIX, socket.SOCK_DGRAM)
kargs["external_fd"] = {ioevent:self.spb}
self.atmt = automaton(*args, **kargs)
self.atmt.runbg()
def fileno(self):
return self.spa.fileno()
def send(self, s):
if not isinstance(s, bytes):
s = bytes(s)
return self.spa.send(s)
def recv(self, n=MTU):
try:
r = self.spa.recv(n)
except recv_error:
if not WINDOWS:
raise
if self.proto is not None:
r = self.proto(r)
return r
def close(self):
pass
class _ATMT_to_supersocket:
def __init__(self, name, ioevent, automaton):
self.name = name
self.ioevent = ioevent
self.automaton = automaton
def __call__(self, proto, *args, **kargs):
return _ATMT_supersocket(self.name, self.ioevent, self.automaton, proto, args, kargs)
class Automaton_metaclass(type):
def __new__(cls, name, bases, dct):
cls = super(Automaton_metaclass, cls).__new__(cls, name, bases, dct)
cls.states={}
cls.state = None
cls.recv_conditions={}
cls.conditions={}
cls.ioevents={}
cls.timeout={}
cls.actions={}
cls.initial_states=[]
cls.ionames = []
cls.iosupersockets = []
members = {}
classes = [cls]
while classes:
c = classes.pop(0) # order is important to avoid breaking method overloading
classes += list(c.__bases__)
for k,v in six.iteritems(c.__dict__):
if k not in members:
members[k] = v
decorated = [v for v in six.itervalues(members)
if isinstance(v, types.FunctionType) and hasattr(v, "atmt_type")]
for m in decorated:
if m.atmt_type == ATMT.STATE:
s = m.atmt_state
cls.states[s] = m
cls.recv_conditions[s]=[]
cls.ioevents[s]=[]
cls.conditions[s]=[]
cls.timeout[s]=[]
if m.atmt_initial:
cls.initial_states.append(m)
elif m.atmt_type in [ATMT.CONDITION, ATMT.RECV, ATMT.TIMEOUT, ATMT.IOEVENT]:
cls.actions[m.atmt_condname] = []
for m in decorated:
if m.atmt_type == ATMT.CONDITION:
cls.conditions[m.atmt_state].append(m)
elif m.atmt_type == ATMT.RECV:
cls.recv_conditions[m.atmt_state].append(m)
elif m.atmt_type == ATMT.IOEVENT:
cls.ioevents[m.atmt_state].append(m)
cls.ionames.append(m.atmt_ioname)
if m.atmt_as_supersocket is not None:
cls.iosupersockets.append(m)
elif m.atmt_type == ATMT.TIMEOUT:
cls.timeout[m.atmt_state].append((m.atmt_timeout, m))
elif m.atmt_type == ATMT.ACTION:
for c in m.atmt_cond:
cls.actions[c].append(m)
for v in six.itervalues(cls.timeout):
v.sort(key=cmp_to_key(lambda t1_f1,t2_f2: cmp(t1_f1[0],t2_f2[0])))
v.append((None, None))
for v in itertools.chain(six.itervalues(cls.conditions),
six.itervalues(cls.recv_conditions),
six.itervalues(cls.ioevents)):
v.sort(key=cmp_to_key(lambda c1,c2: cmp(c1.atmt_prio,c2.atmt_prio)))
for condname,actlst in six.iteritems(cls.actions):
actlst.sort(key=cmp_to_key(lambda c1,c2: cmp(c1.atmt_cond[condname], c2.atmt_cond[condname])))
for ioev in cls.iosupersockets:
setattr(cls, ioev.atmt_as_supersocket, _ATMT_to_supersocket(ioev.atmt_as_supersocket, ioev.atmt_ioname, cls))
return cls
def graph(self, **kargs):
s = 'digraph "%s" {\n' % self.__class__.__name__
se = "" # Keep initial nodes at the begining for better rendering
for st in six.itervalues(self.states):
if st.atmt_initial:
se = ('\t"%s" [ style=filled, fillcolor=blue, shape=box, root=true];\n' % st.atmt_state)+se
elif st.atmt_final:
se += '\t"%s" [ style=filled, fillcolor=green, shape=octagon ];\n' % st.atmt_state
elif st.atmt_error:
se += '\t"%s" [ style=filled, fillcolor=red, shape=octagon ];\n' % st.atmt_state
s += se
for st in six.itervalues(self.states):
for n in st.atmt_origfunc.__code__.co_names+st.atmt_origfunc.__code__.co_consts:
if n in self.states:
s += '\t"%s" -> "%s" [ color=green ];\n' % (st.atmt_state,n)
for c,k,v in ([("purple",k,v) for k,v in self.conditions.items()]+
[("red",k,v) for k,v in self.recv_conditions.items()]+
[("orange",k,v) for k,v in self.ioevents.items()]):
for f in v:
for n in f.__code__.co_names+f.__code__.co_consts:
if n in self.states:
l = f.atmt_condname
for x in self.actions[f.atmt_condname]:
l += "\\l>[%s]" % x.__name__
s += '\t"%s" -> "%s" [label="%s", color=%s];\n' % (k,n,l,c)
for k,v in six.iteritems(self.timeout):
for t,f in v:
if f is None:
continue
for n in f.__code__.co_names+f.__code__.co_consts:
if n in self.states:
l = "%s/%.1fs" % (f.atmt_condname,t)
for x in self.actions[f.atmt_condname]:
l += "\\l>[%s]" % x.__name__
s += '\t"%s" -> "%s" [label="%s",color=blue];\n' % (k,n,l)
s += "}\n"
return do_graph(s, **kargs)
class Automaton(six.with_metaclass(Automaton_metaclass)):
def parse_args(self, debug=0, store=1, **kargs):
self.debug_level=debug
self.socket_kargs = kargs
self.store_packets = store
def master_filter(self, pkt):
return True
def my_send(self, pkt):
self.send_sock.send(pkt)
## Utility classes and exceptions
class _IO_fdwrapper(SelectableObject):
def __init__(self,rd,wr):
if WINDOWS:
# rd will be used for reading and sending
if isinstance(rd, ObjectPipe):
self.rd = rd
else:
raise OSError("On windows, only instances of ObjectPipe are externally available")
else:
if rd is not None and not isinstance(rd, int):
rd = rd.fileno()
if wr is not None and not isinstance(wr, int):
wr = wr.fileno()
self.rd = rd
self.wr = wr
def fileno(self):
return self.rd
def check_recv(self):
return self.rd.check_recv()
def read(self, n=65535):
if WINDOWS:
return self.rd.recv(n)
return os.read(self.rd, n)
def write(self, msg):
if WINDOWS:
self.rd.send(msg)
return self.call_release()
return os.write(self.wr,msg)
def recv(self, n=65535):
return self.read(n)
def send(self, msg):
return self.write(msg)
class _IO_mixer(SelectableObject):
def __init__(self,rd,wr):
self.rd = rd
self.wr = wr
def fileno(self):
if isinstance(self.rd, int):
return self.rd
return self.rd.fileno()
def check_recv(self):
return self.rd.check_recv()
def recv(self, n=None):
return self.rd.recv(n)
def read(self, n=None):
return self.recv(n)
def send(self, msg):
self.wr.send(msg)
return self.call_release()
def write(self, msg):
return self.send(msg)
class AutomatonException(Exception):
def __init__(self, msg, state=None, result=None):
Exception.__init__(self, msg)
self.state = state
self.result = result
class AutomatonError(AutomatonException):
pass
class ErrorState(AutomatonException):
pass
class Stuck(AutomatonException):
pass
class AutomatonStopped(AutomatonException):
pass
class Breakpoint(AutomatonStopped):
pass
class Singlestep(AutomatonStopped):
pass
class InterceptionPoint(AutomatonStopped):
def __init__(self, msg, state=None, result=None, packet=None):
Automaton.AutomatonStopped.__init__(self, msg, state=state, result=result)
self.packet = packet
class CommandMessage(AutomatonException):
pass
## Services
def debug(self, lvl, msg):
if self.debug_level >= lvl:
log_interactive.debug(msg)
def send(self, pkt):
if self.state.state in self.interception_points:
self.debug(3,"INTERCEPT: packet intercepted: %s" % pkt.summary())
self.intercepted_packet = pkt
cmd = Message(type = _ATMT_Command.INTERCEPT, state=self.state, pkt=pkt)
self.cmdout.send(cmd)
cmd = self.cmdin.recv()
self.intercepted_packet = None
if cmd.type == _ATMT_Command.REJECT:
self.debug(3,"INTERCEPT: packet rejected")
return
elif cmd.type == _ATMT_Command.REPLACE:
pkt = cmd.pkt
self.debug(3,"INTERCEPT: packet replaced by: %s" % pkt.summary())
elif cmd.type == _ATMT_Command.ACCEPT:
self.debug(3,"INTERCEPT: packet accepted")
else:
raise self.AutomatonError("INTERCEPT: unkown verdict: %r" % cmd.type)
self.my_send(pkt)
self.debug(3,"SENT : %s" % pkt.summary())
if self.store_packets:
self.packets.append(pkt.copy())
## Internals
def __init__(self, *args, **kargs):
external_fd = kargs.pop("external_fd",{})
self.send_sock_class = kargs.pop("ll", conf.L3socket)
self.recv_sock_class = kargs.pop("recvsock", conf.L2listen)
self.started = threading.Lock()
self.threadid = None
self.breakpointed = None
self.breakpoints = set()
self.interception_points = set()
self.intercepted_packet = None
self.debug_level=0
self.init_args=args
self.init_kargs=kargs
self.io = type.__new__(type, "IOnamespace",(),{})
self.oi = type.__new__(type, "IOnamespace",(),{})
self.cmdin = ObjectPipe()
self.cmdout = ObjectPipe()
self.ioin = {}
self.ioout = {}
for n in self.ionames:
extfd = external_fd.get(n)
if not isinstance(extfd, tuple):
extfd = (extfd,extfd)
elif WINDOWS:
raise OSError("Tuples are not allowed as external_fd on windows")
ioin,ioout = extfd
if ioin is None:
ioin = ObjectPipe()
elif not isinstance(ioin, SelectableObject):
ioin = self._IO_fdwrapper(ioin,None)
if ioout is None:
ioout = ioin if WINDOWS else ObjectPipe()
elif not isinstance(ioout, SelectableObject):
ioout = self._IO_fdwrapper(None,ioout)
self.ioin[n] = ioin
self.ioout[n] = ioout
ioin.ioname = n
ioout.ioname = n
setattr(self.io, n, self._IO_mixer(ioout,ioin))
setattr(self.oi, n, self._IO_mixer(ioin,ioout))
for stname in self.states:
setattr(self, stname,
_instance_state(getattr(self, stname)))
self.start()
def __iter__(self):
return self
def __del__(self):
self.stop()
def _run_condition(self, cond, *args, **kargs):
try:
self.debug(5, "Trying %s [%s]" % (cond.atmt_type, cond.atmt_condname))
cond(self,*args, **kargs)
except ATMT.NewStateRequested as state_req:
self.debug(2, "%s [%s] taken to state [%s]" % (cond.atmt_type, cond.atmt_condname, state_req.state))
if cond.atmt_type == ATMT.RECV:
if self.store_packets:
self.packets.append(args[0])
for action in self.actions[cond.atmt_condname]:
self.debug(2, " + Running action [%s]" % action.__name__)
action(self, *state_req.action_args, **state_req.action_kargs)
raise
except Exception as e:
self.debug(2, "%s [%s] raised exception [%s]" % (cond.atmt_type, cond.atmt_condname, e))
raise
else:
self.debug(2, "%s [%s] not taken" % (cond.atmt_type, cond.atmt_condname))
def _do_start(self, *args, **kargs):
ready = threading.Event()
_t = threading.Thread(target=self._do_control, args=(ready,) + (args), kwargs=kargs)
_t.setDaemon(True)
_t.start()
ready.wait()
def _do_control(self, ready, *args, **kargs):
with self.started:
self.threadid = threading.currentThread().ident
# Update default parameters
a = args+self.init_args[len(args):]
k = self.init_kargs.copy()
k.update(kargs)
self.parse_args(*a,**k)
# Start the automaton
self.state=self.initial_states[0](self)
self.send_sock = self.send_sock_class(**self.socket_kargs)
self.listen_sock = self.recv_sock_class(**self.socket_kargs)
self.packets = PacketList(name="session[%s]"%self.__class__.__name__)
singlestep = True
iterator = self._do_iter()
self.debug(3, "Starting control thread [tid=%i]" % self.threadid)
# Sync threads
ready.set()
try:
while True:
c = self.cmdin.recv()
self.debug(5, "Received command %s" % c.type)
if c.type == _ATMT_Command.RUN:
singlestep = False
elif c.type == _ATMT_Command.NEXT:
singlestep = True
elif c.type == _ATMT_Command.FREEZE:
continue
elif c.type == _ATMT_Command.STOP:
break
while True:
state = next(iterator)
if isinstance(state, self.CommandMessage):
break
elif isinstance(state, self.Breakpoint):
c = Message(type=_ATMT_Command.BREAKPOINT,state=state)
self.cmdout.send(c)
break
if singlestep:
c = Message(type=_ATMT_Command.SINGLESTEP,state=state)
self.cmdout.send(c)
break
except StopIteration as e:
c = Message(type=_ATMT_Command.END, result=e.args[0])
self.cmdout.send(c)
except Exception as e:
exc_info = sys.exc_info()
self.debug(3, "Transfering exception from tid=%i:\n%s"% (self.threadid, traceback.format_exception(*exc_info)))
m = Message(type=_ATMT_Command.EXCEPTION, exception=e, exc_info=exc_info)
self.cmdout.send(m)
self.debug(3, "Stopping control thread (tid=%i)"%self.threadid)
self.threadid = None
def _do_iter(self):
while True:
try:
self.debug(1, "## state=[%s]" % self.state.state)
# Entering a new state. First, call new state function
if self.state.state in self.breakpoints and self.state.state != self.breakpointed:
self.breakpointed = self.state.state
yield self.Breakpoint("breakpoint triggered on state %s" % self.state.state,
state = self.state.state)
self.breakpointed = None
state_output = self.state.run()
if self.state.error:
raise self.ErrorState("Reached %s: [%r]" % (self.state.state, state_output),
result=state_output, state=self.state.state)
if self.state.final:
raise StopIteration(state_output)
if state_output is None:
state_output = ()
elif not isinstance(state_output, list):
state_output = state_output,
# Then check immediate conditions
for cond in self.conditions[self.state.state]:
self._run_condition(cond, *state_output)
# If still there and no conditions left, we are stuck!
if ( len(self.recv_conditions[self.state.state]) == 0 and
len(self.ioevents[self.state.state]) == 0 and
len(self.timeout[self.state.state]) == 1 ):
raise self.Stuck("stuck in [%s]" % self.state.state,
state=self.state.state, result=state_output)
# Finally listen and pay attention to timeouts
expirations = iter(self.timeout[self.state.state])
next_timeout,timeout_func = next(expirations)
t0 = time.time()
fds = [self.cmdin]
if len(self.recv_conditions[self.state.state]) > 0:
fds.append(self.listen_sock)
for ioev in self.ioevents[self.state.state]:
fds.append(self.ioin[ioev.atmt_ioname])
while True:
t = time.time()-t0
if next_timeout is not None:
if next_timeout <= t:
self._run_condition(timeout_func, *state_output)
next_timeout,timeout_func = next(expirations)
if next_timeout is None:
remain = None
else:
remain = next_timeout-t
self.debug(5, "Select on %r" % fds)
r = select_objects(fds, remain)
self.debug(5, "Selected %r" % r)
for fd in r:
self.debug(5, "Looking at %r" % fd)
if fd == self.cmdin:
yield self.CommandMessage("Received command message")
elif fd == self.listen_sock:
pkt = self.listen_sock.recv(MTU)
if pkt is not None:
if self.master_filter(pkt):
self.debug(3, "RECVD: %s" % pkt.summary())
for rcvcond in self.recv_conditions[self.state.state]:
self._run_condition(rcvcond, pkt, *state_output)
else:
self.debug(4, "FILTR: %s" % pkt.summary())
else:
self.debug(3, "IOEVENT on %s" % fd.ioname)
for ioevt in self.ioevents[self.state.state]:
if ioevt.atmt_ioname == fd.ioname:
self._run_condition(ioevt, fd, *state_output)
except ATMT.NewStateRequested as state_req:
self.debug(2, "switching from [%s] to [%s]" % (self.state.state,state_req.state))
self.state = state_req
yield state_req
## Public API
def add_interception_points(self, *ipts):
for ipt in ipts:
if hasattr(ipt,"atmt_state"):
ipt = ipt.atmt_state
self.interception_points.add(ipt)
def remove_interception_points(self, *ipts):
for ipt in ipts:
if hasattr(ipt,"atmt_state"):
ipt = ipt.atmt_state
self.interception_points.discard(ipt)
def add_breakpoints(self, *bps):
for bp in bps:
if hasattr(bp,"atmt_state"):
bp = bp.atmt_state
self.breakpoints.add(bp)
def remove_breakpoints(self, *bps):
for bp in bps:
if hasattr(bp,"atmt_state"):
bp = bp.atmt_state
self.breakpoints.discard(bp)
def start(self, *args, **kargs):
if not self.started.locked():
self._do_start(*args, **kargs)
def run(self, resume=None, wait=True):
if resume is None:
resume = Message(type = _ATMT_Command.RUN)
self.cmdin.send(resume)
if wait:
try:
c = self.cmdout.recv()
except KeyboardInterrupt:
self.cmdin.send(Message(type = _ATMT_Command.FREEZE))
return
if c.type == _ATMT_Command.END:
return c.result
elif c.type == _ATMT_Command.INTERCEPT:
raise self.InterceptionPoint("packet intercepted", state=c.state.state, packet=c.pkt)
elif c.type == _ATMT_Command.SINGLESTEP:
raise self.Singlestep("singlestep state=[%s]"%c.state.state, state=c.state.state)
elif c.type == _ATMT_Command.BREAKPOINT:
raise self.Breakpoint("breakpoint triggered on state [%s]"%c.state.state, state=c.state.state)
elif c.type == _ATMT_Command.EXCEPTION:
six.reraise(c.exc_info[0], c.exc_info[1], c.exc_info[2])
def runbg(self, resume=None, wait=False):
self.run(resume, wait)
def next(self):
return self.run(resume = Message(type=_ATMT_Command.NEXT))
__next__ = next
def stop(self):
self.cmdin.send(Message(type=_ATMT_Command.STOP))
with self.started:
# Flush command pipes
while True:
r = select_objects([self.cmdin, self.cmdout], 0)
if not r:
break
for fd in r:
fd.recv()
def restart(self, *args, **kargs):
self.stop()
self.start(*args, **kargs)
def accept_packet(self, pkt=None, wait=False):
rsm = Message()
if pkt is None:
rsm.type = _ATMT_Command.ACCEPT
else:
rsm.type = _ATMT_Command.REPLACE
rsm.pkt = pkt
return self.run(resume=rsm, wait=wait)
def reject_packet(self, wait=False):
rsm = Message(type = _ATMT_Command.REJECT)
return self.run(resume=rsm, wait=wait)
| 1 | 11,510 | To avoid a circular import between pcapdnet and automaton | secdev-scapy | py |
@@ -1490,8 +1490,7 @@ class CommandDispatcher:
@cmdutils.register(instance='command-dispatcher', scope='window')
def view_source(self):
"""Show the source of the current page in a new tab."""
- # pylint: disable=no-member
- # WORKAROUND for https://bitbucket.org/logilab/pylint/issue/491/
+
tab = self._current_widget()
if tab.data.viewing_source:
raise cmdexc.CommandError("Already viewing source!") | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Command dispatcher for TabbedBrowser."""
import os
import os.path
import shlex
import functools
import typing
from PyQt5.QtWidgets import QApplication, QTabBar, QDialog
from PyQt5.QtCore import Qt, QUrl, QEvent, QUrlQuery
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtPrintSupport import QPrintDialog, QPrintPreviewDialog
import pygments
import pygments.lexers
import pygments.formatters
from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners
from qutebrowser.config import config, configdata
from qutebrowser.browser import (urlmarks, browsertab, inspector, navigate,
webelem, downloads)
from qutebrowser.keyinput import modeman
from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils,
objreg, utils, debug)
from qutebrowser.utils.usertypes import KeyMode
from qutebrowser.misc import editor, guiprocess
from qutebrowser.completion.models import urlmodel, miscmodels
from qutebrowser.mainwindow import mainwindow
class CommandDispatcher:
"""Command dispatcher for TabbedBrowser.
Contains all commands which are related to the current tab.
We can't simply add these commands to BrowserTab directly and use
currentWidget() for TabbedBrowser.cmd because at the time
cmdutils.register() decorators are run, currentWidget() will return None.
Attributes:
_editor: The ExternalEditor object.
_win_id: The window ID the CommandDispatcher is associated with.
_tabbed_browser: The TabbedBrowser used.
"""
def __init__(self, win_id, tabbed_browser):
self._win_id = win_id
self._tabbed_browser = tabbed_browser
def __repr__(self):
return utils.get_repr(self)
def _new_tabbed_browser(self, private):
"""Get a tabbed-browser from a new window."""
new_window = mainwindow.MainWindow(private=private)
new_window.show()
return new_window.tabbed_browser
def _count(self):
"""Convenience method to get the widget count."""
return self._tabbed_browser.count()
def _set_current_index(self, idx):
"""Convenience method to set the current widget index."""
cmdutils.check_overflow(idx, 'int')
self._tabbed_browser.setCurrentIndex(idx)
def _current_index(self):
"""Convenience method to get the current widget index."""
return self._tabbed_browser.currentIndex()
def _current_url(self):
"""Convenience method to get the current url."""
try:
return self._tabbed_browser.current_url()
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdexc.CommandError(msg)
def _current_title(self):
"""Convenience method to get the current title."""
return self._current_widget().title()
def _current_widget(self):
"""Get the currently active widget from a command."""
widget = self._tabbed_browser.currentWidget()
if widget is None:
raise cmdexc.CommandError("No WebView available yet!")
return widget
def _open(self, url, tab=False, background=False, window=False,
related=False, private=None):
"""Helper function to open a page.
Args:
url: The URL to open as QUrl.
tab: Whether to open in a new tab.
background: Whether to open in the background.
window: Whether to open in a new window
private: If opening a new window, open it in private browsing mode.
If not given, inherit the current window's mode.
"""
urlutils.raise_cmdexc_if_invalid(url)
tabbed_browser = self._tabbed_browser
cmdutils.check_exclusive((tab, background, window, private), 'tbwp')
if window and private is None:
private = self._tabbed_browser.private
if window or private:
tabbed_browser = self._new_tabbed_browser(private)
tabbed_browser.tabopen(url)
elif tab:
tabbed_browser.tabopen(url, background=False, related=related)
elif background:
tabbed_browser.tabopen(url, background=True, related=related)
else:
widget = self._current_widget()
widget.openurl(url)
def _cntwidget(self, count=None):
"""Return a widget based on a count/idx.
Args:
count: The tab index, or None.
Return:
The current widget if count is None.
The widget with the given tab ID if count is given.
None if no widget was found.
"""
if count is None:
return self._tabbed_browser.currentWidget()
elif 1 <= count <= self._count():
cmdutils.check_overflow(count + 1, 'int')
return self._tabbed_browser.widget(count - 1)
else:
return None
def _tab_focus_last(self, *, show_error=True):
"""Select the tab which was last focused."""
try:
tab = objreg.get('last-focused-tab', scope='window',
window=self._win_id)
except KeyError:
if not show_error:
return
raise cmdexc.CommandError("No last focused tab!")
idx = self._tabbed_browser.indexOf(tab)
if idx == -1:
raise cmdexc.CommandError("Last focused tab vanished!")
self._set_current_index(idx)
def _get_selection_override(self, prev, next_, opposite):
"""Helper function for tab_close to get the tab to select.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
Return:
QTabBar.SelectLeftTab, QTabBar.SelectRightTab, or None if no change
should be made.
"""
cmdutils.check_exclusive((prev, next_, opposite), 'pno')
if prev:
return QTabBar.SelectLeftTab
elif next_:
return QTabBar.SelectRightTab
elif opposite:
conf_selection = config.val.tabs.select_on_remove
if conf_selection == QTabBar.SelectLeftTab:
return QTabBar.SelectRightTab
elif conf_selection == QTabBar.SelectRightTab:
return QTabBar.SelectLeftTab
elif conf_selection == QTabBar.SelectPreviousTab:
raise cmdexc.CommandError(
"-o is not supported with 'tabs.select_on_remove' set to "
"'last-used'!")
else: # pragma: no cover
raise ValueError("Invalid select_on_remove value "
"{!r}!".format(conf_selection))
return None
def _tab_close(self, tab, prev=False, next_=False, opposite=False):
"""Helper function for tab_close be able to handle message.async.
Args:
tab: Tab object to select be closed.
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
count: The tab index to close, or None
"""
tabbar = self._tabbed_browser.tabBar()
selection_override = self._get_selection_override(prev, next_,
opposite)
if selection_override is None:
self._tabbed_browser.close_tab(tab)
else:
old_selection_behavior = tabbar.selectionBehaviorOnRemove()
tabbar.setSelectionBehaviorOnRemove(selection_override)
self._tabbed_browser.close_tab(tab)
tabbar.setSelectionBehaviorOnRemove(old_selection_behavior)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_close(self, prev=False, next_=False, opposite=False,
force=False, count=None):
"""Close the current/[count]th tab.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
force: Avoid confirmation for pinned tabs.
count: The tab index to close, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
close = functools.partial(self._tab_close, tab, prev,
next_, opposite)
self._tabbed_browser.tab_close_prompt_if_pinned(tab, force, close)
@cmdutils.register(instance='command-dispatcher', scope='window',
name='tab-pin')
@cmdutils.argument('count', count=True)
def tab_pin(self, count=None):
"""Pin/Unpin the current/[count]th tab.
Pinning a tab shrinks it to the size of its title text.
Attempting to close a pinned tab will cause a confirmation,
unless --force is passed.
Args:
count: The tab index to pin or unpin, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
to_pin = not tab.data.pinned
self._tabbed_browser.set_tab_pinned(tab, to_pin)
@cmdutils.register(instance='command-dispatcher', name='open',
maxsplit=0, scope='window')
@cmdutils.argument('url', completion=urlmodel.url)
@cmdutils.argument('count', count=True)
def openurl(self, url=None, related=False,
bg=False, tab=False, window=False, count=None, secure=False,
private=False):
"""Open a URL in the current/[count]th tab.
If the URL contains newlines, each line gets opened in its own tab.
Args:
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
count: The tab index to open the URL in, or None.
secure: Force HTTPS.
private: Open a new window in private browsing mode.
"""
if url is None:
urls = [config.val.url.default_page]
else:
urls = self._parse_url_input(url)
for i, cur_url in enumerate(urls):
if secure:
cur_url.setScheme('https')
if not window and i > 0:
tab = False
bg = True
if tab or bg or window or private:
self._open(cur_url, tab, bg, window, related=related,
private=private)
else:
curtab = self._cntwidget(count)
if curtab is None:
if count is None:
# We want to open a URL in the current tab, but none
# exists yet.
self._tabbed_browser.tabopen(cur_url)
else:
# Explicit count with a tab that doesn't exist.
return
elif curtab.data.pinned:
message.info("Tab is pinned!")
else:
curtab.openurl(cur_url)
def _parse_url(self, url, *, force_search=False):
"""Parse a URL or quickmark or search query.
Args:
url: The URL to parse.
force_search: Whether to force a search even if the content can be
interpreted as a URL or a path.
Return:
A URL that can be opened.
"""
try:
return objreg.get('quickmark-manager').get(url)
except urlmarks.Error:
try:
return urlutils.fuzzy_url(url, force_search=force_search)
except urlutils.InvalidUrlError as e:
# We don't use cmdexc.CommandError here as this can be
# called async from edit_url
message.error(str(e))
return None
def _parse_url_input(self, url):
"""Parse a URL or newline-separated list of URLs.
Args:
url: The URL or list to parse.
Return:
A list of URLs that can be opened.
"""
if isinstance(url, QUrl):
yield url
return
force_search = False
urllist = [u for u in url.split('\n') if u.strip()]
if (len(urllist) > 1 and not urlutils.is_url(urllist[0]) and
urlutils.get_path_if_valid(urllist[0], check_exists=True)
is None):
urllist = [url]
force_search = True
for cur_url in urllist:
parsed = self._parse_url(cur_url, force_search=force_search)
if parsed is not None:
yield parsed
@cmdutils.register(instance='command-dispatcher', name='reload',
scope='window')
@cmdutils.argument('count', count=True)
def reloadpage(self, force=False, count=None):
"""Reload the current/[count]th tab.
Args:
count: The tab index to reload, or None.
force: Bypass the page cache.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.reload(force=force)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def stop(self, count=None):
"""Stop loading in the current/[count]th tab.
Args:
count: The tab index to stop, or None.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.stop()
def _print_preview(self, tab):
"""Show a print preview."""
def print_callback(ok):
if not ok:
message.error("Printing failed!")
tab.printing.check_preview_support()
diag = QPrintPreviewDialog(tab)
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.setWindowFlags(diag.windowFlags() | Qt.WindowMaximizeButtonHint |
Qt.WindowMinimizeButtonHint)
diag.paintRequested.connect(functools.partial(
tab.printing.to_printer, callback=print_callback))
diag.exec_()
def _print_pdf(self, tab, filename):
"""Print to the given PDF file."""
tab.printing.check_pdf_support()
filename = os.path.expanduser(filename)
directory = os.path.dirname(filename)
if directory and not os.path.exists(directory):
os.mkdir(directory)
tab.printing.to_pdf(filename)
log.misc.debug("Print to file: {}".format(filename))
def _print(self, tab):
"""Print with a QPrintDialog."""
def print_callback(ok):
"""Called when printing finished."""
if not ok:
message.error("Printing failed!")
diag.deleteLater()
def do_print():
"""Called when the dialog was closed."""
tab.printing.to_printer(diag.printer(), print_callback)
diag = QPrintDialog(tab)
if utils.is_mac:
# For some reason we get a segfault when using open() on macOS
ret = diag.exec_()
if ret == QDialog.Accepted:
do_print()
else:
diag.open(do_print)
@cmdutils.register(instance='command-dispatcher', name='print',
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('pdf', flag='f', metavar='file')
def printpage(self, preview=False, count=None, *, pdf=None):
"""Print the current/[count]th tab.
Args:
preview: Show preview instead of printing.
count: The tab index to print, or None.
pdf: The file path to write the PDF to.
"""
tab = self._cntwidget(count)
if tab is None:
return
try:
if pdf:
tab.printing.check_pdf_support()
else:
tab.printing.check_printer_support()
if preview:
tab.printing.check_preview_support()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
if preview:
self._print_preview(tab)
elif pdf:
self._print_pdf(tab, pdf)
else:
self._print(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_clone(self, bg=False, window=False):
"""Duplicate the current tab.
Args:
bg: Open in a background tab.
window: Open in a new window.
Return:
The new QWebView.
"""
cmdutils.check_exclusive((bg, window), 'bw')
curtab = self._current_widget()
cur_title = self._tabbed_browser.page_title(self._current_index())
try:
history = curtab.history.serialize()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
# The new tab could be in a new tabbed_browser (e.g. because of
# tabs.tabs_are_windows being set)
if window:
new_tabbed_browser = self._new_tabbed_browser(
private=self._tabbed_browser.private)
else:
new_tabbed_browser = self._tabbed_browser
newtab = new_tabbed_browser.tabopen(background=bg)
new_tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=newtab.win_id)
idx = new_tabbed_browser.indexOf(newtab)
new_tabbed_browser.set_page_title(idx, cur_title)
if config.val.tabs.favicons.show:
new_tabbed_browser.setTabIcon(idx, curtab.icon())
if config.val.tabs.tabs_are_windows:
new_tabbed_browser.window().setWindowIcon(curtab.icon())
newtab.data.keep_icon = True
newtab.history.deserialize(history)
newtab.zoom.set_factor(curtab.zoom.factor())
new_tabbed_browser.set_tab_pinned(newtab, curtab.data.pinned)
return newtab
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', completion=miscmodels.buffer)
def tab_take(self, index):
"""Take a tab from another window.
Args:
index: The [win_id/]index of the tab to take. Or a substring
in which case the closest match will be taken.
"""
tabbed_browser, tab = self._resolve_buffer_index(index)
if tabbed_browser is self._tabbed_browser:
raise cmdexc.CommandError("Can't take a tab from the same window")
self._open(tab.url(), tab=True)
tabbed_browser.close_tab(tab, add_undo=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('win_id', completion=miscmodels.window)
def tab_give(self, win_id: int = None):
"""Give the current tab to a new or existing window if win_id given.
If no win_id is given, the tab will get detached into a new window.
Args:
win_id: The window ID of the window to give the current tab to.
"""
if win_id == self._win_id:
raise cmdexc.CommandError("Can't give a tab to the same window")
if win_id is None:
if self._count() < 2:
raise cmdexc.CommandError("Cannot detach from a window with "
"only one tab")
tabbed_browser = self._new_tabbed_browser(
private=self._tabbed_browser.private)
else:
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tabbed_browser.tabopen(self._current_url())
self._tabbed_browser.close_tab(self._current_widget(), add_undo=False)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window', deprecated='Use :tab-give instead!')
def tab_detach(self):
"""Deprecated way to detach a tab."""
self.tab_give()
def _back_forward(self, tab, bg, window, count, forward):
"""Helper function for :back/:forward."""
history = self._current_widget().history
# Catch common cases before e.g. cloning tab
if not forward and not history.can_go_back():
raise cmdexc.CommandError("At beginning of history.")
elif forward and not history.can_go_forward():
raise cmdexc.CommandError("At end of history.")
if tab or bg or window:
widget = self.tab_clone(bg, window)
else:
widget = self._current_widget()
try:
if forward:
widget.history.forward(count)
else:
widget.history.back(count)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def back(self, tab=False, bg=False, window=False, count=1):
"""Go back in the history of the current tab.
Args:
tab: Go back in a new tab.
bg: Go back in a background tab.
window: Go back in a new window.
count: How many pages to go back.
"""
self._back_forward(tab, bg, window, count, forward=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def forward(self, tab=False, bg=False, window=False, count=1):
"""Go forward in the history of the current tab.
Args:
tab: Go forward in a new tab.
bg: Go forward in a background tab.
window: Go forward in a new window.
count: How many pages to go forward.
"""
self._back_forward(tab, bg, window, count, forward=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('where', choices=['prev', 'next', 'up', 'increment',
'decrement'])
@cmdutils.argument('count', count=True)
def navigate(self, where: str, tab=False, bg=False, window=False, count=1):
"""Open typical prev/next links or navigate using the URL path.
This tries to automatically click on typical _Previous Page_ or
_Next Page_ links using some heuristics.
Alternatively it can navigate by changing the current URL.
Args:
where: What to open.
- `prev`: Open a _previous_ link.
- `next`: Open a _next_ link.
- `up`: Go up a level in the current URL.
- `increment`: Increment the last number in the URL.
- `decrement`: Decrement the last number in the URL.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
count: For `increment` and `decrement`, the number to change the
URL by. For `up`, the number of levels to go up in the URL.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
cmdutils.check_exclusive((tab, bg, window), 'tbw')
widget = self._current_widget()
url = self._current_url().adjusted(QUrl.RemoveFragment)
handlers = {
'prev': functools.partial(navigate.prevnext, prev=True),
'next': functools.partial(navigate.prevnext, prev=False),
'up': navigate.path_up,
'decrement': functools.partial(navigate.incdec,
inc_or_dec='decrement'),
'increment': functools.partial(navigate.incdec,
inc_or_dec='increment'),
}
try:
if where in ['prev', 'next']:
handler = handlers[where]
handler(browsertab=widget, win_id=self._win_id, baseurl=url,
tab=tab, background=bg, window=window)
elif where in ['up', 'increment', 'decrement']:
new_url = handlers[where](url, count)
self._open(new_url, tab, bg, window, related=True)
else: # pragma: no cover
raise ValueError("Got called with invalid value {} for "
"`where'.".format(where))
except navigate.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def scroll_px(self, dx: int, dy: int, count=1):
"""Scroll the current tab by 'count * dx/dy' pixels.
Args:
dx: How much to scroll in x-direction.
dy: How much to scroll in y-direction.
count: multiplier
"""
dx *= count
dy *= count
cmdutils.check_overflow(dx, 'int')
cmdutils.check_overflow(dy, 'int')
self._current_widget().scroller.delta(dx, dy)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def scroll(self, direction: typing.Union[str, int], count=1):
"""Scroll the current tab in the given direction.
Note you can use `:run-with-count` to have a keybinding with a bigger
scroll increment.
Args:
direction: In which direction to scroll
(up/down/left/right/top/bottom).
count: multiplier
"""
tab = self._current_widget()
funcs = {
'up': tab.scroller.up,
'down': tab.scroller.down,
'left': tab.scroller.left,
'right': tab.scroller.right,
'top': tab.scroller.top,
'bottom': tab.scroller.bottom,
'page-up': tab.scroller.page_up,
'page-down': tab.scroller.page_down,
}
try:
func = funcs[direction]
except KeyError:
expected_values = ', '.join(sorted(funcs))
raise cmdexc.CommandError("Invalid value {!r} for direction - "
"expected one of: {}".format(
direction, expected_values))
if direction in ['top', 'bottom']:
func()
else:
func(count=count)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('horizontal', flag='x')
def scroll_to_perc(self, perc: float = None, horizontal=False, count=None):
"""Scroll to a specific percentage of the page.
The percentage can be given either as argument or as count.
If no percentage is given, the page is scrolled to the end.
Args:
perc: Percentage to scroll.
horizontal: Scroll horizontally instead of vertically.
count: Percentage to scroll.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
if perc is None and count is None:
perc = 100
elif count is not None:
perc = count
if horizontal:
x = perc
y = None
else:
x = None
y = perc
self._current_widget().scroller.to_perc(x, y)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('top_navigate', metavar='ACTION',
choices=('prev', 'decrement'))
@cmdutils.argument('bottom_navigate', metavar='ACTION',
choices=('next', 'increment'))
def scroll_page(self, x: float, y: float, *,
top_navigate: str = None, bottom_navigate: str = None,
count=1):
"""Scroll the frame page-wise.
Args:
x: How many pages to scroll to the right.
y: How many pages to scroll down.
bottom_navigate: :navigate action (next, increment) to run when
scrolling down at the bottom of the page.
top_navigate: :navigate action (prev, decrement) to run when
scrolling up at the top of the page.
count: multiplier
"""
tab = self._current_widget()
if not tab.url().isValid():
# See https://github.com/qutebrowser/qutebrowser/issues/701
return
if bottom_navigate is not None and tab.scroller.at_bottom():
self.navigate(bottom_navigate)
return
elif top_navigate is not None and tab.scroller.at_top():
self.navigate(top_navigate)
return
try:
tab.scroller.delta_page(count * x, count * y)
except OverflowError:
raise cmdexc.CommandError(
"Numeric argument is too large for internal int "
"representation.")
def _yank_url(self, what):
"""Helper method for yank() to get the URL to copy."""
assert what in ['url', 'pretty-url'], what
flags = QUrl.RemovePassword
if what == 'pretty-url':
flags |= QUrl.DecodeReserved
else:
flags |= QUrl.FullyEncoded
url = QUrl(self._current_url())
url_query = QUrlQuery()
url_query_str = url.query()
if '&' not in url_query_str and ';' in url_query_str:
url_query.setQueryDelimiters('=', ';')
url_query.setQuery(url_query_str)
for key in dict(url_query.queryItems()):
if key in config.val.url.yank_ignored_parameters:
url_query.removeQueryItem(key)
url.setQuery(url_query)
return url.toString(flags)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('what', choices=['selection', 'url', 'pretty-url',
'title', 'domain'])
def yank(self, what='url', sel=False, keep=False):
"""Yank something to the clipboard or primary selection.
Args:
what: What to yank.
- `url`: The current URL.
- `pretty-url`: The URL in pretty decoded form.
- `title`: The current page's title.
- `domain`: The current scheme, domain, and port number.
- `selection`: The selection under the cursor.
sel: Use the primary selection instead of the clipboard.
keep: Stay in visual mode after yanking the selection.
"""
if what == 'title':
s = self._tabbed_browser.page_title(self._current_index())
elif what == 'domain':
port = self._current_url().port()
s = '{}://{}{}'.format(self._current_url().scheme(),
self._current_url().host(),
':' + str(port) if port > -1 else '')
elif what in ['url', 'pretty-url']:
s = self._yank_url(what)
what = 'URL' # For printing
elif what == 'selection':
caret = self._current_widget().caret
s = caret.selection()
if not caret.has_selection() or not s:
message.info("Nothing to yank")
return
else: # pragma: no cover
raise ValueError("Invalid value {!r} for `what'.".format(what))
if sel and utils.supports_selection():
target = "primary selection"
else:
sel = False
target = "clipboard"
utils.set_clipboard(s, selection=sel)
if what != 'selection':
message.info("Yanked {} to {}: {}".format(what, target, s))
else:
message.info("{} {} yanked to {}".format(
len(s), "char" if len(s) == 1 else "chars", target))
if not keep:
modeman.leave(self._win_id, KeyMode.caret, "yank selected",
maybe=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_in(self, count=1):
"""Increase the zoom level for the current tab.
Args:
count: How many steps to zoom in.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_out(self, count=1):
"""Decrease the zoom level for the current tab.
Args:
count: How many steps to zoom out.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(-count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom(self, zoom=None, count=None):
"""Set the zoom level for the current tab.
The zoom can be given as argument or as [count]. If neither is
given, the zoom is set to the default zoom. If both are given,
use [count].
Args:
zoom: The zoom percentage to set.
count: The zoom percentage to set.
"""
if zoom is not None:
try:
zoom = int(zoom.rstrip('%'))
except ValueError:
raise cmdexc.CommandError("zoom: Invalid int value {}"
.format(zoom))
level = count if count is not None else zoom
if level is None:
level = config.val.zoom.default
tab = self._current_widget()
try:
tab.zoom.set_factor(float(level) / 100)
except ValueError:
raise cmdexc.CommandError("Can't zoom {}%!".format(level))
message.info("Zoom level: {}%".format(int(level)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_only(self, prev=False, next_=False, force=False):
"""Close all tabs except for the current one.
Args:
prev: Keep tabs before the current.
next_: Keep tabs after the current.
force: Avoid confirmation for pinned tabs.
"""
cmdutils.check_exclusive((prev, next_), 'pn')
cur_idx = self._tabbed_browser.currentIndex()
assert cur_idx != -1
def _to_close(i):
"""Helper method to check if a tab should be closed or not."""
return not (i == cur_idx or
(prev and i < cur_idx) or
(next_ and i > cur_idx))
# Check to see if we are closing any pinned tabs
if not force:
for i, tab in enumerate(self._tabbed_browser.widgets()):
if _to_close(i) and tab.data.pinned:
self._tabbed_browser.tab_close_prompt_if_pinned(
tab,
force,
lambda: self.tab_only(
prev=prev, next_=next_, force=True))
return
for i, tab in enumerate(self._tabbed_browser.widgets()):
if _to_close(i):
self._tabbed_browser.close_tab(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
def undo(self):
"""Re-open a closed tab."""
try:
self._tabbed_browser.undo()
except IndexError:
raise cmdexc.CommandError("Nothing to undo!")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_prev(self, count=1):
"""Switch to the previous tab, or switch [count] tabs back.
Args:
count: How many tabs to switch back.
"""
if self._count() == 0:
# Running :tab-prev after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() - count
if newidx >= 0:
self._set_current_index(newidx)
elif config.val.tabs.wrap:
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("First tab")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_next(self, count=1):
"""Switch to the next tab, or switch [count] tabs forward.
Args:
count: How many tabs to switch forward.
"""
if self._count() == 0:
# Running :tab-next after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() + count
if newidx < self._count():
self._set_current_index(newidx)
elif config.val.tabs.wrap:
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("Last tab")
def _resolve_buffer_index(self, index):
"""Resolve a buffer index to the tabbedbrowser and tab.
Args:
index: The [win_id/]index of the tab to be selected. Or a substring
in which case the closest match will be focused.
"""
index_parts = index.split('/', 1)
try:
for part in index_parts:
int(part)
except ValueError:
model = miscmodels.buffer()
model.set_pattern(index)
if model.count() > 0:
index = model.data(model.first_item())
index_parts = index.split('/', 1)
else:
raise cmdexc.CommandError(
"No matching tab for: {}".format(index))
if len(index_parts) == 2:
win_id = int(index_parts[0])
idx = int(index_parts[1])
elif len(index_parts) == 1:
idx = int(index_parts[0])
active_win = objreg.get('app').activeWindow()
if active_win is None:
# Not sure how you enter a command without an active window...
raise cmdexc.CommandError(
"No window specified and couldn't find active window!")
win_id = active_win.win_id
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if not 0 < idx <= tabbed_browser.count():
raise cmdexc.CommandError(
"There's no tab with index {}!".format(idx))
return (tabbed_browser, tabbed_browser.widget(idx-1))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', completion=miscmodels.buffer)
@cmdutils.argument('count', count=True)
def buffer(self, index=None, count=None):
"""Select tab by index or url/title best match.
Focuses window if necessary when index is given. If both index and
count are given, use count.
Args:
index: The [win_id/]index of the tab to focus. Or a substring
in which case the closest match will be focused.
count: The tab index to focus, starting with 1.
"""
if count is None and index is None:
raise cmdexc.CommandError("buffer: Either a count or the argument "
"index must be specified.")
if count is not None:
index = str(count)
tabbed_browser, tab = self._resolve_buffer_index(index)
window = tabbed_browser.window()
window.activateWindow()
window.raise_()
tabbed_browser.setCurrentWidget(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['last'])
@cmdutils.argument('count', count=True)
def tab_focus(self, index: typing.Union[str, int] = None, count=None):
"""Select the tab given as argument/[count].
If neither count nor index are given, it behaves like tab-next.
If both are given, use count.
Args:
index: The tab index to focus, starting with 1. The special value
`last` focuses the last focused tab (regardless of count).
Negative indices count from the end, such that -1 is the
last tab.
count: The tab index to focus, starting with 1.
"""
index = count if count is not None else index
if index == 'last':
self._tab_focus_last()
return
elif index == self._current_index() + 1:
self._tab_focus_last(show_error=False)
return
elif index is None:
self.tab_next()
return
if index < 0:
index = self._count() + index + 1
if 1 <= index <= self._count():
self._set_current_index(index - 1)
else:
raise cmdexc.CommandError("There's no tab with index {}!".format(
index))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['+', '-'])
@cmdutils.argument('count', count=True)
def tab_move(self, index: typing.Union[str, int] = None, count=None):
"""Move the current tab according to the argument and [count].
If neither is given, move it to the first position.
Args:
index: `+` or `-` to move relative to the current tab by
count, or a default of 1 space.
A tab index to move to that index.
count: If moving relatively: Offset.
If moving absolutely: New position (default: 0). This
overrides the index argument, if given.
"""
if index in ['+', '-']:
# relative moving
new_idx = self._current_index()
delta = 1 if count is None else count
if index == '-':
new_idx -= delta
elif index == '+': # pragma: no branch
new_idx += delta
if config.val.tabs.wrap:
new_idx %= self._count()
else:
# absolute moving
if count is not None:
new_idx = count - 1
elif index is not None:
new_idx = index - 1 if index >= 0 else index + self._count()
else:
new_idx = 0
if not 0 <= new_idx < self._count():
raise cmdexc.CommandError("Can't move tab to position {}!".format(
new_idx + 1))
cur_idx = self._current_index()
cmdutils.check_overflow(cur_idx, 'int')
cmdutils.check_overflow(new_idx, 'int')
self._tabbed_browser.tabBar().moveTab(cur_idx, new_idx)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_replace_variables=True)
def spawn(self, cmdline, userscript=False, verbose=False, detach=False):
"""Spawn a command in a shell.
Args:
userscript: Run the command as a userscript. You can use an
absolute path, or store the userscript in one of those
locations:
- `~/.local/share/qutebrowser/userscripts`
(or `$XDG_DATA_DIR`)
- `/usr/share/qutebrowser/userscripts`
verbose: Show notifications when the command started/exited.
detach: Whether the command should be detached from qutebrowser.
cmdline: The commandline to execute.
"""
cmdutils.check_exclusive((userscript, detach), 'ud')
try:
cmd, *args = shlex.split(cmdline)
except ValueError as e:
raise cmdexc.CommandError("Error while splitting command: "
"{}".format(e))
args = runners.replace_variables(self._win_id, args)
log.procs.debug("Executing {} with args {}, userscript={}".format(
cmd, args, userscript))
if userscript:
# ~ expansion is handled by the userscript module.
self._run_userscript(cmd, *args, verbose=verbose)
else:
cmd = os.path.expanduser(cmd)
proc = guiprocess.GUIProcess(what='command', verbose=verbose,
parent=self._tabbed_browser)
if detach:
proc.start_detached(cmd, args)
else:
proc.start(cmd, args)
@cmdutils.register(instance='command-dispatcher', scope='window')
def home(self):
"""Open main startpage in current tab."""
self.openurl(config.val.url.start_pages[0])
def _run_userscript(self, cmd, *args, verbose=False):
"""Run a userscript given as argument.
Args:
cmd: The userscript to run.
args: Arguments to pass to the userscript.
verbose: Show notifications when the command started/exited.
"""
env = {
'QUTE_MODE': 'command',
}
idx = self._current_index()
if idx != -1:
env['QUTE_TITLE'] = self._tabbed_browser.page_title(idx)
tab = self._tabbed_browser.currentWidget()
if tab is not None and tab.caret.has_selection():
env['QUTE_SELECTED_TEXT'] = tab.caret.selection()
try:
env['QUTE_SELECTED_HTML'] = tab.caret.selection(html=True)
except browsertab.UnsupportedOperationError:
pass
# FIXME:qtwebengine: If tab is None, run_async will fail!
try:
url = self._tabbed_browser.current_url()
except qtutils.QtValueError:
pass
else:
env['QUTE_URL'] = url.toString(QUrl.FullyEncoded)
try:
userscripts.run_async(tab, cmd, *args, win_id=self._win_id,
env=env, verbose=verbose)
except userscripts.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
def quickmark_save(self):
"""Save the current page as a quickmark."""
quickmark_manager = objreg.get('quickmark-manager')
quickmark_manager.prompt_save(self._current_url())
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name', completion=miscmodels.quickmark)
def quickmark_load(self, name, tab=False, bg=False, window=False):
"""Load a quickmark.
Args:
name: The name of the quickmark to load.
tab: Load the quickmark in a new tab.
bg: Load the quickmark in a new background tab.
window: Load the quickmark in a new window.
"""
try:
url = objreg.get('quickmark-manager').get(name)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name', completion=miscmodels.quickmark)
def quickmark_del(self, name=None):
"""Delete a quickmark.
Args:
name: The name of the quickmark to delete. If not given, delete the
quickmark for the current page (choosing one arbitrarily
if there are more than one).
"""
quickmark_manager = objreg.get('quickmark-manager')
if name is None:
url = self._current_url()
try:
name = quickmark_manager.get_by_qurl(url)
except urlmarks.DoesNotExistError as e:
raise cmdexc.CommandError(str(e))
try:
quickmark_manager.delete(name)
except KeyError:
raise cmdexc.CommandError("Quickmark '{}' not found!".format(name))
@cmdutils.register(instance='command-dispatcher', scope='window')
def bookmark_add(self, url=None, title=None, toggle=False):
"""Save the current page as a bookmark, or a specific url.
If no url and title are provided, then save the current page as a
bookmark.
If a url and title have been provided, then save the given url as
a bookmark with the provided title.
You can view all saved bookmarks on the
link:qute://bookmarks[bookmarks page].
Args:
url: url to save as a bookmark. If None, use url of current page.
title: title of the new bookmark.
toggle: remove the bookmark instead of raising an error if it
already exists.
"""
if url and not title:
raise cmdexc.CommandError('Title must be provided if url has '
'been provided')
bookmark_manager = objreg.get('bookmark-manager')
if url is None:
url = self._current_url()
else:
try:
url = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
if not title:
title = self._current_title()
try:
was_added = bookmark_manager.add(url, title, toggle=toggle)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
else:
msg = "Bookmarked {}" if was_added else "Removed bookmark {}"
message.info(msg.format(url.toDisplayString()))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=miscmodels.bookmark)
def bookmark_load(self, url, tab=False, bg=False, window=False,
delete=False):
"""Load a bookmark.
Args:
url: The url of the bookmark to load.
tab: Load the bookmark in a new tab.
bg: Load the bookmark in a new background tab.
window: Load the bookmark in a new window.
delete: Whether to delete the bookmark afterwards.
"""
try:
qurl = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(qurl, tab, bg, window)
if delete:
self.bookmark_del(url)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=miscmodels.bookmark)
def bookmark_del(self, url=None):
"""Delete a bookmark.
Args:
url: The url of the bookmark to delete. If not given, use the
current page's url.
"""
if url is None:
url = self._current_url().toString(QUrl.RemovePassword |
QUrl.FullyEncoded)
try:
objreg.get('bookmark-manager').delete(url)
except KeyError:
raise cmdexc.CommandError("Bookmark '{}' not found!".format(url))
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
def follow_selected(self, *, tab=False):
"""Follow the selected text.
Args:
tab: Load the selected link in a new tab.
"""
try:
self._current_widget().caret.follow_selected(tab=tab)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', name='inspector',
scope='window')
def toggle_inspector(self):
"""Toggle the web inspector.
Note: Due a bug in Qt, the inspector will show incorrect request
headers in the network tab.
"""
tab = self._current_widget()
# FIXME:qtwebengine have a proper API for this
page = tab._widget.page() # pylint: disable=protected-access
try:
if tab.data.inspector is None:
tab.data.inspector = inspector.create()
tab.data.inspector.inspect(page)
else:
tab.data.inspector.toggle(page)
except inspector.WebInspectorError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('dest_old', hide=True)
def download(self, url=None, dest_old=None, *, mhtml_=False, dest=None):
"""Download a given URL, or current page if no URL given.
The form `:download [url] [dest]` is deprecated, use `:download --dest
[dest] [url]` instead.
Args:
url: The URL to download. If not given, download the current page.
dest_old: (deprecated) Same as dest.
dest: The file path to write the download to, or None to ask.
mhtml_: Download the current page and all assets as mhtml file.
"""
if dest_old is not None:
message.warning(":download [url] [dest] is deprecated - use "
":download --dest [dest] [url]")
if dest is not None:
raise cmdexc.CommandError("Can't give two destinations for the"
" download.")
dest = dest_old
# FIXME:qtwebengine do this with the QtWebEngine download manager?
download_manager = objreg.get('qtnetwork-download-manager',
scope='window', window=self._win_id)
target = None
if dest is not None:
dest = downloads.transform_path(dest)
if dest is None:
raise cmdexc.CommandError("Invalid target filename")
target = downloads.FileDownloadTarget(dest)
tab = self._current_widget()
user_agent = tab.user_agent()
if url:
if mhtml_:
raise cmdexc.CommandError("Can only download the current page"
" as mhtml.")
url = urlutils.qurl_from_user_input(url)
urlutils.raise_cmdexc_if_invalid(url)
download_manager.get(url, user_agent=user_agent, target=target)
elif mhtml_:
tab = self._current_widget()
if tab.backend == usertypes.Backend.QtWebEngine:
webengine_download_manager = objreg.get(
'webengine-download-manager')
try:
webengine_download_manager.get_mhtml(tab, target)
except browsertab.UnsupportedOperationError as e:
raise cmdexc.CommandError(e)
else:
download_manager.get_mhtml(tab, target)
else:
qnam = tab.networkaccessmanager()
suggested_fn = downloads.suggested_fn_from_title(
self._current_url().path(), tab.title()
)
download_manager.get(
self._current_url(),
user_agent=user_agent,
qnam=qnam,
target=target,
suggested_fn=suggested_fn
)
@cmdutils.register(instance='command-dispatcher', scope='window')
def view_source(self):
"""Show the source of the current page in a new tab."""
# pylint: disable=no-member
# WORKAROUND for https://bitbucket.org/logilab/pylint/issue/491/
tab = self._current_widget()
if tab.data.viewing_source:
raise cmdexc.CommandError("Already viewing source!")
try:
current_url = self._current_url()
except cmdexc.CommandError as e:
message.error(str(e))
return
def show_source_cb(source):
"""Show source as soon as it's ready."""
lexer = pygments.lexers.HtmlLexer()
formatter = pygments.formatters.HtmlFormatter(
full=True, linenos='table',
title='Source for {}'.format(current_url.toDisplayString()))
highlighted = pygments.highlight(source, lexer, formatter)
new_tab = self._tabbed_browser.tabopen()
new_tab.set_html(highlighted)
new_tab.data.viewing_source = True
tab.dump_async(show_source_cb)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
def debug_dump_page(self, dest, plain=False):
"""Dump the current page's content to a file.
Args:
dest: Where to write the file to.
plain: Write plain text instead of HTML.
"""
tab = self._current_widget()
dest = os.path.expanduser(dest)
def callback(data):
try:
with open(dest, 'w', encoding='utf-8') as f:
f.write(data)
except OSError as e:
message.error('Could not write page: {}'.format(e))
else:
message.info("Dumped page to {}.".format(dest))
tab.dump_async(callback, plain=plain)
@cmdutils.register(instance='command-dispatcher', scope='window')
def history(self, tab=True, bg=False, window=False):
"""Show browsing history.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
url = QUrl('qute://history/')
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', name='help',
scope='window')
@cmdutils.argument('topic', completion=miscmodels.helptopic)
def show_help(self, tab=False, bg=False, window=False, topic=None):
r"""Show help about a command or setting.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
topic: The topic to show help for.
- :__command__ for commands.
- __section__.__option__ for settings.
"""
if topic is None:
path = 'index.html'
elif topic.startswith(':'):
command = topic[1:]
if command not in cmdutils.cmd_dict:
raise cmdexc.CommandError("Invalid command {}!".format(
command))
path = 'commands.html#{}'.format(command)
elif topic in configdata.DATA:
path = 'settings.html#{}'.format(topic)
else:
raise cmdexc.CommandError("Invalid help topic {}!".format(topic))
url = QUrl('qute://help/{}'.format(path))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
def messages(self, level='info', plain=False, tab=False, bg=False,
window=False):
"""Show a log of past messages.
Args:
level: Include messages with `level` or higher severity.
Valid values: vdebug, debug, info, warning, error, critical.
plain: Whether to show plaintext (as opposed to html).
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
if level.upper() not in log.LOG_LEVELS:
raise cmdexc.CommandError("Invalid log level {}!".format(level))
if plain:
url = QUrl('qute://plainlog?level={}'.format(level))
else:
url = QUrl('qute://log?level={}'.format(level))
self._open(url, tab, bg, window)
def _open_editor_cb(self, elem):
"""Open editor after the focus elem was found in open_editor."""
if elem is None:
message.error("No element focused!")
return
if not elem.is_editable(strict=True):
message.error("Focused element is not editable!")
return
text = elem.value()
if text is None:
message.error("Could not get text from the focused element.")
return
assert isinstance(text, str), text
ed = editor.ExternalEditor(self._tabbed_browser)
ed.editing_finished.connect(functools.partial(
self.on_editing_finished, elem))
ed.edit(text)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
def open_editor(self):
"""Open an external editor with the currently selected form field.
The editor which should be launched can be configured via the
`editor.command` config option.
"""
tab = self._current_widget()
tab.elements.find_focused(self._open_editor_cb)
def on_editing_finished(self, elem, text):
"""Write the editor text into the form field and clean up tempfile.
Callback for GUIProcess when the editor was closed.
Args:
elem: The WebElementWrapper which was modified.
text: The new text to insert.
"""
try:
elem.set_value(text)
except webelem.Error as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', maxsplit=0,
scope='window')
def insert_text(self, text):
"""Insert text at cursor position.
Args:
text: The text to insert.
"""
tab = self._current_widget()
def _insert_text_cb(elem):
if elem is None:
message.error("No element focused!")
return
try:
elem.insert_text(text)
except webelem.Error as e:
message.error(str(e))
return
tab.elements.find_focused(_insert_text_cb)
@cmdutils.register(instance='command-dispatcher', scope='window',
hide=True)
@cmdutils.argument('filter_', choices=['id'])
def click_element(self, filter_: str, value, *,
target: usertypes.ClickTarget =
usertypes.ClickTarget.normal,
force_event=False):
"""Click the element matching the given filter.
The given filter needs to result in exactly one element, otherwise, an
error is shown.
Args:
filter_: How to filter the elements.
id: Get an element based on its ID.
value: The value to filter for.
target: How to open the clicked element (normal/tab/tab-bg/window).
force_event: Force generating a fake click event.
"""
tab = self._current_widget()
def single_cb(elem):
"""Click a single element."""
if elem is None:
message.error("No element found with id {}!".format(value))
return
try:
elem.click(target, force_event=force_event)
except webelem.Error as e:
message.error(str(e))
return
# def multiple_cb(elems):
# """Click multiple elements (with only one expected)."""
# if not elems:
# message.error("No element found!")
# return
# elif len(elems) != 1:
# message.error("{} elements found!".format(len(elems)))
# return
# elems[0].click(target)
handlers = {
'id': (tab.elements.find_id, single_cb),
}
handler, callback = handlers[filter_]
handler(value, callback)
def _search_cb(self, found, *, tab, old_scroll_pos, options, text, prev):
"""Callback called from search/search_next/search_prev.
Args:
found: Whether the text was found.
tab: The AbstractTab in which the search was made.
old_scroll_pos: The scroll position (QPoint) before the search.
options: The options (dict) the search was made with.
text: The text searched for.
prev: Whether we're searching backwards (i.e. :search-prev)
"""
# :search/:search-next without reverse -> down
# :search/:search-next with reverse -> up
# :search-prev without reverse -> up
# :search-prev with reverse -> down
going_up = options['reverse'] ^ prev
if found:
# Check if the scroll position got smaller and show info.
if not going_up and tab.scroller.pos_px().y() < old_scroll_pos.y():
message.info("Search hit BOTTOM, continuing at TOP")
elif going_up and tab.scroller.pos_px().y() > old_scroll_pos.y():
message.info("Search hit TOP, continuing at BOTTOM")
else:
message.warning("Text '{}' not found on page!".format(text))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
def search(self, text="", reverse=False):
"""Search for a text on the current page. With no text, clear results.
Args:
text: The text to search for.
reverse: Reverse search direction.
"""
self.set_mark("'")
tab = self._current_widget()
if tab.search.search_displayed:
tab.search.clear()
if not text:
return
options = {
'ignore_case': config.val.ignore_case,
'reverse': reverse,
}
self._tabbed_browser.search_text = text
self._tabbed_browser.search_options = dict(options)
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=options, text=text, prev=False)
options['result_cb'] = cb
tab.search.search(text, **options)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def search_next(self, count=1):
"""Continue the search to the ([count]th) next term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=False)
for _ in range(count - 1):
tab.search.next_result()
tab.search.next_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def search_prev(self, count=1):
"""Continue the search to the ([count]th) previous term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=True)
for _ in range(count - 1):
tab.search.prev_result()
tab.search.prev_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_line(self, count=1):
"""Move the cursor or selection to the next line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_line(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_line(self, count=1):
"""Move the cursor or selection to the prev line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_prev_line(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_char(self, count=1):
"""Move the cursor or selection to the next char.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_char(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_char(self, count=1):
"""Move the cursor or selection to the previous char.
Args:
count: How many chars to move.
"""
self._current_widget().caret.move_to_prev_char(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_word(self, count=1):
"""Move the cursor or selection to the end of the word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_end_of_word(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_word(self, count=1):
"""Move the cursor or selection to the next word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_next_word(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_word(self, count=1):
"""Move the cursor or selection to the previous word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_prev_word(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_start_of_line(self):
"""Move the cursor or selection to the start of the line."""
self._current_widget().caret.move_to_start_of_line()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_end_of_line(self):
"""Move the cursor or selection to the end of line."""
self._current_widget().caret.move_to_end_of_line()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_next_block(self, count=1):
"""Move the cursor or selection to the start of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_prev_block(self, count=1):
"""Move the cursor or selection to the start of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_next_block(self, count=1):
"""Move the cursor or selection to the end of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_prev_block(self, count=1):
"""Move the cursor or selection to the end of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_start_of_document(self):
"""Move the cursor or selection to the start of the document."""
self._current_widget().caret.move_to_start_of_document()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_end_of_document(self):
"""Move the cursor or selection to the end of the document."""
self._current_widget().caret.move_to_end_of_document()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def toggle_selection(self):
"""Toggle caret selection mode."""
self._current_widget().caret.toggle_selection()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def drop_selection(self):
"""Drop selection and keep selection mode enabled."""
self._current_widget().caret.drop_selection()
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
@cmdutils.argument('count', count=True)
def debug_webaction(self, action, count=1):
"""Execute a webaction.
Available actions:
http://doc.qt.io/archives/qt-5.5/qwebpage.html#WebAction-enum (WebKit)
http://doc.qt.io/qt-5/qwebenginepage.html#WebAction-enum (WebEngine)
Args:
action: The action to execute, e.g. MoveToNextChar.
count: How many times to repeat the action.
"""
tab = self._current_widget()
for _ in range(count):
try:
tab.action.run_string(action)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_cmd_split=True)
def jseval(self, js_code, file=False, quiet=False, *,
world: typing.Union[usertypes.JsWorld, int] = None):
"""Evaluate a JavaScript string.
Args:
js_code: The string/file to evaluate.
file: Interpret js-code as a path to a file.
quiet: Don't show resulting JS object.
world: Ignored on QtWebKit. On QtWebEngine, a world ID or name to
run the snippet in.
"""
if world is None:
world = usertypes.JsWorld.jseval
if quiet:
jseval_cb = None
else:
def jseval_cb(out):
if out is None:
# Getting the actual error (if any) seems to be difficult.
# The error does end up in
# BrowserPage.javaScriptConsoleMessage(), but
# distinguishing between :jseval errors and errors from the
# webpage is not trivial...
message.info('No output or error')
else:
# The output can be a string, number, dict, array, etc. But
# *don't* output too much data, as this will make
# qutebrowser hang
out = str(out)
if len(out) > 5000:
out = out[:5000] + ' [...trimmed...]'
message.info(out)
if file:
path = os.path.expanduser(js_code)
try:
with open(path, 'r', encoding='utf-8') as f:
js_code = f.read()
except OSError as e:
raise cmdexc.CommandError(str(e))
widget = self._current_widget()
widget.run_js_async(js_code, callback=jseval_cb, world=world)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fake_key(self, keystring, global_=False):
"""Send a fake keypress or key string to the website or qutebrowser.
:fake-key xy - sends the keychain 'xy'
:fake-key <Ctrl-x> - sends Ctrl-x
:fake-key <Escape> - sends the escape key
Args:
keystring: The keystring to send.
global_: If given, the keys are sent to the qutebrowser UI.
"""
try:
keyinfos = utils.parse_keystring(keystring)
except utils.KeyParseError as e:
raise cmdexc.CommandError(str(e))
for keyinfo in keyinfos:
press_event = QKeyEvent(QEvent.KeyPress, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
release_event = QKeyEvent(QEvent.KeyRelease, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
if global_:
window = QApplication.focusWindow()
if window is None:
raise cmdexc.CommandError("No focused window!")
QApplication.postEvent(window, press_event)
QApplication.postEvent(window, release_event)
else:
tab = self._current_widget()
tab.send_event(press_event)
tab.send_event(release_event)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True, backend=usertypes.Backend.QtWebKit)
def debug_clear_ssl_errors(self):
"""Clear remembered SSL error answers."""
self._current_widget().clear_ssl_errors()
@cmdutils.register(instance='command-dispatcher', scope='window')
def edit_url(self, url=None, bg=False, tab=False, window=False):
"""Navigate to a url formed in an external editor.
The editor which should be launched can be configured via the
`editor.command` config option.
Args:
url: URL to edit; defaults to the current page url.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
"""
cmdutils.check_exclusive((tab, bg, window), 'tbw')
old_url = self._current_url().toString()
ed = editor.ExternalEditor(self._tabbed_browser)
# Passthrough for openurl args (e.g. -t, -b, -w)
ed.editing_finished.connect(functools.partial(
self._open_if_changed, old_url=old_url, bg=bg, tab=tab,
window=window))
ed.edit(url or old_url)
@cmdutils.register(instance='command-dispatcher', scope='window',
hide=True)
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.set_mark(key)
@cmdutils.register(instance='command-dispatcher', scope='window',
hide=True)
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.jump_mark(key)
def _open_if_changed(self, url=None, old_url=None, bg=False, tab=False,
window=False):
"""Open a URL unless it's already open in the tab.
Args:
old_url: The original URL to compare against.
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
"""
if bg or tab or window or url != old_url:
self.openurl(url=url, bg=bg, tab=tab, window=window)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fullscreen(self, leave=False):
"""Toggle fullscreen mode.
Args:
leave: Only leave fullscreen if it was entered by the page.
"""
if leave:
tab = self._current_widget()
try:
tab.action.exit_fullscreen()
except browsertab.UnsupportedOperationError:
pass
return
window = self._tabbed_browser.window()
if window.isFullScreen():
window.setWindowState(
window.state_before_fullscreen & ~Qt.WindowFullScreen)
else:
window.state_before_fullscreen = window.windowState()
window.showFullScreen()
log.misc.debug('state before fullscreen: {}'.format(
debug.qflags_key(Qt, window.state_before_fullscreen)))
| 1 | 19,434 | Please remove this blank line. | qutebrowser-qutebrowser | py |
@@ -70,6 +70,7 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, s *v1alpha1.CloudAuditLo
ctx = logging.WithLogger(ctx, c.Logger.With(zap.Any("auditlogsource", s)))
s.Status.InitializeConditions()
+ s.Status.WorkloadIdentityStatus.InitWorkloadIdentityStatus()
s.Status.ObservedGeneration = s.Generation
// If GCP ServiceAccount is provided, reconcile workload identity. | 1 | /*
Copyright 2019 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auditlogs
import (
"context"
"cloud.google.com/go/logging/logadmin"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
corev1 "k8s.io/api/core/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
"knative.dev/pkg/logging"
"knative.dev/pkg/reconciler"
"github.com/google/knative-gcp/pkg/apis/events/v1alpha1"
cloudauditlogssourcereconciler "github.com/google/knative-gcp/pkg/client/injection/reconciler/events/v1alpha1/cloudauditlogssource"
listers "github.com/google/knative-gcp/pkg/client/listers/events/v1alpha1"
glogadmin "github.com/google/knative-gcp/pkg/gclient/logging/logadmin"
gpubsub "github.com/google/knative-gcp/pkg/gclient/pubsub"
"github.com/google/knative-gcp/pkg/reconciler/events/auditlogs/resources"
"github.com/google/knative-gcp/pkg/reconciler/identity"
pubsubreconciler "github.com/google/knative-gcp/pkg/reconciler/pubsub"
)
const (
resourceGroup = "cloudauditlogssources.events.cloud.google.com"
publisherRole = "roles/pubsub.publisher"
deletePubSubFailed = "PubSubDeleteFailed"
deleteSinkFailed = "SinkDeleteFailed"
deleteWorkloadIdentityFailed = "WorkloadIdentityDeleteFailed"
reconciledFailedReason = "SinkReconcileFailed"
reconciledPubSubFailedReason = "PubSubReconcileFailed"
reconciledSuccessReason = "CloudAuditLogsSourceReconciled"
workloadIdentityFailed = "WorkloadIdentityReconcileFailed"
)
type Reconciler struct {
*pubsubreconciler.PubSubBase
// identity reconciler for reconciling workload identity.
*identity.Identity
auditLogsSourceLister listers.CloudAuditLogsSourceLister
logadminClientProvider glogadmin.CreateFn
pubsubClientProvider gpubsub.CreateFn
// serviceAccountLister for reading serviceAccounts.
serviceAccountLister corev1listers.ServiceAccountLister
}
// Check that our Reconciler implements Interface.
var _ cloudauditlogssourcereconciler.Interface = (*Reconciler)(nil)
func (c *Reconciler) ReconcileKind(ctx context.Context, s *v1alpha1.CloudAuditLogsSource) reconciler.Event {
ctx = logging.WithLogger(ctx, c.Logger.With(zap.Any("auditlogsource", s)))
s.Status.InitializeConditions()
s.Status.ObservedGeneration = s.Generation
// If GCP ServiceAccount is provided, reconcile workload identity.
if s.Spec.ServiceAccount != "" {
if _, err := c.Identity.ReconcileWorkloadIdentity(ctx, s.Spec.Project, s); err != nil {
return reconciler.NewEvent(corev1.EventTypeWarning, workloadIdentityFailed, "Failed to reconcile CloudAuditLogsSource workload identity: %s", err.Error())
}
}
topic := resources.GenerateTopicName(s)
t, ps, err := c.PubSubBase.ReconcilePubSub(ctx, s, topic, resourceGroup)
if err != nil {
return reconciler.NewEvent(corev1.EventTypeWarning, reconciledPubSubFailedReason, "Reconcile PubSub failed with: %s", err.Error())
}
c.Logger.Debugf("Reconciled: PubSub: %+v PullSubscription: %+v", t, ps)
sink, err := c.reconcileSink(ctx, s)
if err != nil {
return reconciler.NewEvent(corev1.EventTypeWarning, reconciledFailedReason, "Reconcile Sink failed with: %s", err.Error())
}
s.Status.StackdriverSink = sink
s.Status.MarkSinkReady()
c.Logger.Debugf("Reconciled Stackdriver sink: %+v", sink)
return reconciler.NewEvent(corev1.EventTypeNormal, reconciledSuccessReason, `CloudAuditLogsSource reconciled: "%s/%s"`, s.Namespace, s.Name)
}
func (c *Reconciler) reconcileSink(ctx context.Context, s *v1alpha1.CloudAuditLogsSource) (string, error) {
sink, err := c.ensureSinkCreated(ctx, s)
if err != nil {
s.Status.MarkSinkNotReady("SinkCreateFailed", "failed to ensure creation of logging sink: %s", err.Error())
return "", err
}
err = c.ensureSinkIsPublisher(ctx, s, sink)
if err != nil {
s.Status.MarkSinkNotReady("SinkNotPublisher", "failed to ensure sink has pubsub.publisher permission on source topic: %s", err.Error())
return "", err
}
return sink.ID, nil
}
func (c *Reconciler) ensureSinkCreated(ctx context.Context, s *v1alpha1.CloudAuditLogsSource) (*logadmin.Sink, error) {
sinkID := s.Status.StackdriverSink
if sinkID == "" {
sinkID = resources.GenerateSinkName(s)
}
logadminClient, err := c.logadminClientProvider(ctx, s.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create LogAdmin client", zap.Error(err))
return nil, err
}
sink, err := logadminClient.Sink(ctx, sinkID)
if status.Code(err) == codes.NotFound {
filterBuilder := resources.FilterBuilder{}
filterBuilder.WithServiceName(s.Spec.ServiceName).WithMethodName(s.Spec.MethodName)
if s.Spec.ResourceName != "" {
filterBuilder.WithResourceName(s.Spec.ResourceName)
}
sink = &logadmin.Sink{
ID: sinkID,
Destination: resources.GenerateTopicResourceName(s),
Filter: filterBuilder.GetFilterQuery(),
}
sink, err = logadminClient.CreateSinkOpt(ctx, sink, logadmin.SinkOptions{UniqueWriterIdentity: true})
// Handle AlreadyExists in-case of a race between another create call.
if status.Code(err) == codes.AlreadyExists {
sink, err = logadminClient.Sink(ctx, sinkID)
}
}
return sink, err
}
// Ensures that the sink has been granted the pubsub.publisher role on the source topic.
func (c *Reconciler) ensureSinkIsPublisher(ctx context.Context, s *v1alpha1.CloudAuditLogsSource, sink *logadmin.Sink) error {
pubsubClient, err := c.pubsubClientProvider(ctx, s.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create PubSub client", zap.Error(err))
return err
}
topicIam := pubsubClient.Topic(s.Status.TopicID).IAM()
topicPolicy, err := topicIam.Policy(ctx)
if err != nil {
return err
}
if !topicPolicy.HasRole(sink.WriterIdentity, publisherRole) {
topicPolicy.Add(sink.WriterIdentity, publisherRole)
if err = topicIam.SetPolicy(ctx, topicPolicy); err != nil {
return err
}
logging.FromContext(ctx).Desugar().Debug(
"Granted the Stackdriver Sink writer identity roles/pubsub.publisher on PubSub Topic.",
zap.String("writerIdentity", sink.WriterIdentity),
zap.String("topicID", s.Status.TopicID))
}
return nil
}
// deleteSink looks at status.SinkID and if non-empty will delete the
// previously created stackdriver sink.
func (c *Reconciler) deleteSink(ctx context.Context, s *v1alpha1.CloudAuditLogsSource) error {
if s.Status.StackdriverSink == "" {
return nil
}
logadminClient, err := c.logadminClientProvider(ctx, s.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create LogAdmin client", zap.Error(err))
return err
}
if err = logadminClient.DeleteSink(ctx, s.Status.StackdriverSink); status.Code(err) != codes.NotFound {
return err
}
return nil
}
func (c *Reconciler) FinalizeKind(ctx context.Context, s *v1alpha1.CloudAuditLogsSource) reconciler.Event {
// If k8s ServiceAccount exists and it only has one ownerReference, remove the corresponding GCP ServiceAccount iam policy binding.
// No need to delete k8s ServiceAccount, it will be automatically handled by k8s Garbage Collection.
if s.Spec.ServiceAccount != "" {
if err := c.Identity.DeleteWorkloadIdentity(ctx, s.Spec.Project, s); err != nil {
return reconciler.NewEvent(corev1.EventTypeWarning, deleteWorkloadIdentityFailed, "Failed to delete CloudAuditLogsSource workload identity: %s", err.Error())
}
}
if err := c.deleteSink(ctx, s); err != nil {
return reconciler.NewEvent(corev1.EventTypeWarning, deleteSinkFailed, "Failed to delete Stackdriver sink: %s", err.Error())
}
if err := c.PubSubBase.DeletePubSub(ctx, s); err != nil {
return reconciler.NewEvent(corev1.EventTypeWarning, deletePubSubFailed, "Failed to delete CloudAuditLogsSource PubSub: %s", err.Error())
}
s.Status.StackdriverSink = ""
return nil
}
| 1 | 11,346 | it feels that the WorkloadIdentityCondition should be added to this Status... And if you have workload identity enabled, but not working, then you can mark that condition failed, and also Mark the ready false... not entirely sure though | google-knative-gcp | go |
@@ -1681,6 +1681,16 @@ bool CoreChecks::ValidatePipelineUnlocked(const PIPELINE_STATE *pPipeline, uint3
}
}
}
+
+ auto provoking_vertex_state_ci = lvl_find_in_chain<VkPipelineRasterizationProvokingVertexStateCreateInfoEXT>(
+ pPipeline->graphicsPipelineCI.pRasterizationState->pNext);
+ if (provoking_vertex_state_ci &&
+ provoking_vertex_state_ci->provokingVertexMode == VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT &&
+ !enabled_features.provoking_vertex_features.provokingVertexLast) {
+ skip |= LogError(
+ device, "VUID-VkPipelineRasterizationProvokingVertexStateCreateInfoEXT-provokingVertexMode-04883",
+ "provokingVertexLast feature is not enabled.");
+ }
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pVertexInputState) { | 1 | /* Copyright (c) 2015-2021 The Khronos Group Inc.
* Copyright (c) 2015-2021 Valve Corporation
* Copyright (c) 2015-2021 LunarG, Inc.
* Copyright (C) 2015-2021 Google Inc.
* Modifications Copyright (C) 2020-2021 Advanced Micro Devices, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Cody Northrop <cnorthrop@google.com>
* Author: Michael Lentine <mlentine@google.com>
* Author: Tobin Ehlis <tobine@google.com>
* Author: Chia-I Wu <olv@google.com>
* Author: Chris Forbes <chrisf@ijw.co.nz>
* Author: Mark Lobodzinski <mark@lunarg.com>
* Author: Ian Elliott <ianelliott@google.com>
* Author: Dave Houlton <daveh@lunarg.com>
* Author: Dustin Graves <dustin@lunarg.com>
* Author: Jeremy Hayes <jeremy@lunarg.com>
* Author: Jon Ashburn <jon@lunarg.com>
* Author: Karl Schultz <karl@lunarg.com>
* Author: Mark Young <marky@lunarg.com>
* Author: Mike Schuchardt <mikes@lunarg.com>
* Author: Mike Weiblen <mikew@lunarg.com>
* Author: Tony Barbour <tony@LunarG.com>
* Author: John Zulauf <jzulauf@lunarg.com>
* Author: Shannon McPherson <shannon@lunarg.com>
* Author: Jeremy Kniager <jeremyk@lunarg.com>
* Author: Tobias Hector <tobias.hector@amd.com>
* Author: Jeremy Gebben <jeremyg@lunarg.com>
*/
#include <algorithm>
#include <array>
#include <assert.h>
#include <cmath>
#include <iostream>
#include <list>
#include <map>
#include <memory>
#include <mutex>
#include <set>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <valarray>
#include "vk_loader_platform.h"
#include "vk_enum_string_helper.h"
#include "chassis.h"
#include "convert_to_renderpass2.h"
#include "core_validation.h"
#include "buffer_validation.h"
#include "shader_validation.h"
#include "vk_layer_utils.h"
#include "sync_utils.h"
#include "sync_vuid_maps.h"
// these templates are defined in buffer_validation.cpp so we need to pull in the explicit instantiations from there
extern template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count,
const VkImageMemoryBarrier *barrier);
extern template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count,
const VkImageMemoryBarrier2KHR *barrier);
extern template bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state,
const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc,
const VkRenderPass rp_handle,
const VkImageMemoryBarrier &img_barrier,
const CMD_BUFFER_STATE *primary_cb_state) const;
extern template bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state,
const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc,
const VkRenderPass rp_handle,
const VkImageMemoryBarrier2KHR &img_barrier,
const CMD_BUFFER_STATE *primary_cb_state) const;
extern template BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(const CMD_BUFFER_STATE *cb_state,
uint32_t buffer_barrier_count,
const VkBufferMemoryBarrier *buffer_barriers,
uint32_t image_barrier_count,
const VkImageMemoryBarrier *image_barriers) const;
extern template BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(
const CMD_BUFFER_STATE *cb_state, uint32_t buffer_barrier_count, const VkBufferMemoryBarrier2KHR *buffer_barriers,
uint32_t image_barrier_count, const VkImageMemoryBarrier2KHR *image_barriers) const;
// These functions are defined *outside* the core_validation namespace as their type
// is also defined outside that namespace
size_t PipelineLayoutCompatDef::hash() const {
hash_util::HashCombiner hc;
// The set number is integral to the CompatDef's distinctiveness
hc << set << push_constant_ranges.get();
const auto &descriptor_set_layouts = *set_layouts_id.get();
for (uint32_t i = 0; i <= set; i++) {
hc << descriptor_set_layouts[i].get();
}
return hc.Value();
}
bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const {
if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) {
return false;
}
if (set_layouts_id == other.set_layouts_id) {
// if it's the same set_layouts_id, then *any* subset will match
return true;
}
// They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match
const auto &descriptor_set_layouts = *set_layouts_id.get();
assert(set < descriptor_set_layouts.size());
const auto &other_ds_layouts = *other.set_layouts_id.get();
assert(set < other_ds_layouts.size());
for (uint32_t i = 0; i <= set; i++) {
if (descriptor_set_layouts[i] != other_ds_layouts[i]) {
return false;
}
}
return true;
}
using std::max;
using std::string;
using std::stringstream;
using std::unique_ptr;
using std::vector;
// The const variant only need the image as it is the key for the map
const ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(const CMD_BUFFER_STATE *cb_state, VkImage image) {
auto it = cb_state->image_layout_map.find(image);
if (it == cb_state->image_layout_map.cend()) {
return nullptr;
}
return &it->second;
}
// The non-const variant only needs the image state, as the factory requires it to construct a new entry
ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE &image_state) {
auto &layout_map = cb_state->image_layout_map[image_state.image];
if (!layout_map) {
// Was an empty slot... fill it in.
layout_map.emplace(image_state);
}
return &layout_map;
}
void AddInitialLayoutintoImageLayoutMap(const IMAGE_STATE &image_state, GlobalImageLayoutMap &image_layout_map) {
auto *range_map = GetLayoutRangeMap(image_layout_map, image_state);
auto range_gen = subresource_adapter::RangeGenerator(image_state.subresource_encoder, image_state.full_range);
for (; range_gen->non_empty(); ++range_gen) {
range_map->insert(range_map->end(), std::make_pair(*range_gen, image_state.createInfo.initialLayout));
}
}
// Override base class, we have some extra work to do here
void CoreChecks::InitDeviceValidationObject(bool add_obj, ValidationObject *inst_obj, ValidationObject *dev_obj) {
if (add_obj) {
ValidationStateTracker::InitDeviceValidationObject(add_obj, inst_obj, dev_obj);
}
}
// Tracks the number of commands recorded in a command buffer.
void CoreChecks::IncrementCommandCount(VkCommandBuffer commandBuffer) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->commandCount++;
}
// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
template <typename T1>
bool CoreChecks::VerifyBoundMemoryIsValid(const DEVICE_MEMORY_STATE *mem_state, const T1 object,
const VulkanTypedHandle &typed_handle, const char *api_name,
const char *error_code) const {
return VerifyBoundMemoryIsValid<T1, SimpleErrorLocation>(mem_state, object, typed_handle, {api_name, error_code});
}
template <typename T1, typename LocType>
bool CoreChecks::VerifyBoundMemoryIsValid(const DEVICE_MEMORY_STATE *mem_state, const T1 object,
const VulkanTypedHandle &typed_handle, const LocType &location) const {
bool result = false;
auto type_name = object_string[typed_handle.type];
if (!mem_state) {
result |= LogError(object, location.Vuid(),
"%s: %s used with no memory bound. Memory should be bound by calling vkBind%sMemory().",
location.FuncName(), report_data->FormatHandle(typed_handle).c_str(), type_name + 2);
} else if (mem_state->destroyed) {
result |= LogError(object, location.Vuid(),
"%s: %s used with no memory bound and previously bound memory was freed. Memory must not be freed "
"prior to this operation.",
location.FuncName(), report_data->FormatHandle(typed_handle).c_str());
}
return result;
}
// Check to see if memory was ever bound to this image
bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const Location &loc) const {
using LocationAdapter = core_error::LocationVuidAdapter<sync_vuid_maps::GetImageBarrierVUIDFunctor>;
return ValidateMemoryIsBoundToImage<LocationAdapter>(image_state, LocationAdapter(loc, sync_vuid_maps::ImageError::kNoMemory));
}
bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const char *api_name, const char *error_code) const {
return ValidateMemoryIsBoundToImage<SimpleErrorLocation>(image_state, SimpleErrorLocation(api_name, error_code));
}
template <typename LocType>
bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const LocType &location) const {
bool result = false;
if (image_state->create_from_swapchain != VK_NULL_HANDLE) {
if (image_state->bind_swapchain == VK_NULL_HANDLE) {
LogObjectList objlist(image_state->image);
objlist.add(image_state->create_from_swapchain);
result |= LogError(
objlist, location.Vuid(),
"%s: %s is created by %s, and the image should be bound by calling vkBindImageMemory2(), and the pNext chain "
"includes VkBindImageMemorySwapchainInfoKHR.",
location.FuncName(), report_data->FormatHandle(image_state->image).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str());
} else if (image_state->create_from_swapchain != image_state->bind_swapchain) {
LogObjectList objlist(image_state->image);
objlist.add(image_state->create_from_swapchain);
objlist.add(image_state->bind_swapchain);
result |=
LogError(objlist, location.Vuid(),
"%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same "
"swapchain",
location.FuncName(), report_data->FormatHandle(image_state->image).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str(),
report_data->FormatHandle(image_state->bind_swapchain).c_str());
}
} else if (image_state->external_ahb) {
// TODO look into how to properly check for a valid bound memory for an external AHB
} else if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
result |= VerifyBoundMemoryIsValid(image_state->binding.mem_state.get(), image_state->image,
VulkanTypedHandle(image_state->image, kVulkanObjectTypeImage), location);
}
return result;
}
// Check to see if memory was bound to this buffer
bool CoreChecks::ValidateMemoryIsBoundToBuffer(const BUFFER_STATE *buffer_state, const char *api_name,
const char *error_code) const {
bool result = false;
if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
result |= VerifyBoundMemoryIsValid(buffer_state->binding.mem_state.get(), buffer_state->buffer,
VulkanTypedHandle(buffer_state->buffer, kVulkanObjectTypeBuffer), api_name, error_code);
}
return result;
}
// Check to see if memory was bound to this acceleration structure
bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE *as_state, const char *api_name,
const char *error_code) const {
return VerifyBoundMemoryIsValid(as_state->binding.mem_state.get(), as_state->acceleration_structure,
VulkanTypedHandle(as_state->acceleration_structure, kVulkanObjectTypeAccelerationStructureNV),
api_name, error_code);
}
// Check to see if memory was bound to this acceleration structure
bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE_KHR *as_state,
const char *api_name, const char *error_code) const {
return VerifyBoundMemoryIsValid(as_state->binding.mem_state.get(), as_state->acceleration_structure,
VulkanTypedHandle(as_state->acceleration_structure, kVulkanObjectTypeAccelerationStructureKHR),
api_name, error_code);
}
// Valid usage checks for a call to SetMemBinding().
// For NULL mem case, output warning
// Make sure given object is in global object map
// IF a previous binding existed, output validation error
// Otherwise, add reference from objectInfo to memoryInfo
// Add reference off of objInfo
// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
bool CoreChecks::ValidateSetMemBinding(VkDeviceMemory mem, const VulkanTypedHandle &typed_handle, const char *apiName) const {
bool skip = false;
// It's an error to bind an object to NULL memory
if (mem != VK_NULL_HANDLE) {
const BINDABLE *mem_binding = ValidationStateTracker::GetObjectMemBinding(typed_handle);
assert(mem_binding);
if (mem_binding->sparse) {
const char *error_code = nullptr;
const char *handle_type = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
handle_type = "BUFFER";
if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
error_code = "VUID-vkBindBufferMemory-buffer-01030";
} else {
error_code = "VUID-VkBindBufferMemoryInfo-buffer-01030";
}
} else if (typed_handle.type == kVulkanObjectTypeImage) {
handle_type = "IMAGE";
if (strcmp(apiName, "vkBindImageMemory()") == 0) {
error_code = "VUID-vkBindImageMemory-image-01045";
} else {
error_code = "VUID-VkBindImageMemoryInfo-image-01045";
}
} else {
// Unsupported object type
assert(false);
}
LogObjectList objlist(mem);
objlist.add(typed_handle);
skip |= LogError(objlist, error_code,
"In %s, attempting to bind %s to %s which was created with sparse memory flags "
"(VK_%s_CREATE_SPARSE_*_BIT).",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
handle_type);
}
const DEVICE_MEMORY_STATE *mem_info = ValidationStateTracker::GetDevMemState(mem);
if (mem_info) {
const DEVICE_MEMORY_STATE *prev_binding = mem_binding->binding.mem_state.get();
if (prev_binding) {
if (!prev_binding->destroyed) {
const char *error_code = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
error_code = "VUID-vkBindBufferMemory-buffer-01029";
} else {
error_code = "VUID-VkBindBufferMemoryInfo-buffer-01029";
}
} else if (typed_handle.type == kVulkanObjectTypeImage) {
if (strcmp(apiName, "vkBindImageMemory()") == 0) {
error_code = "VUID-vkBindImageMemory-image-01044";
} else {
error_code = "VUID-VkBindImageMemoryInfo-image-01044";
}
} else {
// Unsupported object type
assert(false);
}
LogObjectList objlist(mem);
objlist.add(typed_handle);
objlist.add(prev_binding->mem);
skip |=
LogError(objlist, error_code, "In %s, attempting to bind %s to %s which has already been bound to %s.",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
report_data->FormatHandle(prev_binding->mem).c_str());
} else {
LogObjectList objlist(mem);
objlist.add(typed_handle);
skip |=
LogError(objlist, kVUID_Core_MemTrack_RebindObject,
"In %s, attempting to bind %s to %s which was previous bound to memory that has "
"since been freed. Memory bindings are immutable in "
"Vulkan so this attempt to bind to new memory is not allowed.",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str());
}
}
}
}
return skip;
}
bool CoreChecks::ValidateDeviceQueueFamily(uint32_t queue_family, const char *cmd_name, const char *parameter_name,
const char *error_code, bool optional = false) const {
bool skip = false;
if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) {
skip |= LogError(device, error_code,
"%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.",
cmd_name, parameter_name);
} else if (queue_family_index_map.find(queue_family) == queue_family_index_map.end()) {
skip |=
LogError(device, error_code,
"%s: %s (= %" PRIu32
") is not one of the queue families given via VkDeviceQueueCreateInfo structures when the device was created.",
cmd_name, parameter_name, queue_family);
}
return skip;
}
// Validate the specified queue families against the families supported by the physical device that owns this device
bool CoreChecks::ValidatePhysicalDeviceQueueFamilies(uint32_t queue_family_count, const uint32_t *queue_families,
const char *cmd_name, const char *array_parameter_name,
const char *vuid) const {
bool skip = false;
if (queue_families) {
layer_data::unordered_set<uint32_t> set;
for (uint32_t i = 0; i < queue_family_count; ++i) {
std::string parameter_name = std::string(array_parameter_name) + "[" + std::to_string(i) + "]";
if (set.count(queue_families[i])) {
skip |= LogError(device, vuid, "%s: %s (=%" PRIu32 ") is not unique within %s array.", cmd_name,
parameter_name.c_str(), queue_families[i], array_parameter_name);
} else {
set.insert(queue_families[i]);
if (queue_families[i] == VK_QUEUE_FAMILY_IGNORED) {
skip |= LogError(
device, vuid,
"%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.",
cmd_name, parameter_name.c_str());
} else if (queue_families[i] >= physical_device_state->queue_family_known_count) {
LogObjectList obj_list(physical_device);
obj_list.add(device);
skip |=
LogError(obj_list, vuid,
"%s: %s (= %" PRIu32
") is not one of the queue families supported by the parent PhysicalDevice %s of this device %s.",
cmd_name, parameter_name.c_str(), queue_families[i],
report_data->FormatHandle(physical_device).c_str(), report_data->FormatHandle(device).c_str());
}
}
}
}
return skip;
}
// Check object status for selected flag state
bool CoreChecks::ValidateStatus(const CMD_BUFFER_STATE *pNode, CBStatusFlags status_mask, const char *fail_msg,
const char *msg_code) const {
if (!(pNode->status & status_mask)) {
return LogError(pNode->commandBuffer, msg_code, "%s: %s..", report_data->FormatHandle(pNode->commandBuffer).c_str(),
fail_msg);
}
return false;
}
// Return true if for a given PSO, the given state enum is dynamic, else return false
bool CoreChecks::IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) const {
if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
}
}
return false;
}
// Validate state stored as flags at time of draw call
bool CoreChecks::ValidateDrawStateFlags(const CMD_BUFFER_STATE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
const char *msg_code) const {
bool result = false;
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
result |=
ValidateStatus(pCB, CBSTATUS_LINE_WIDTH_SET, "Dynamic line width state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pRasterizationState &&
(pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
result |=
ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_SET, "Dynamic depth bias state not set for this command buffer", msg_code);
}
if (pPipe->blendConstantsEnabled) {
result |= ValidateStatus(pCB, CBSTATUS_BLEND_CONSTANTS_SET, "Dynamic blend constants state not set for this command buffer",
msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
result |=
ValidateStatus(pCB, CBSTATUS_DEPTH_BOUNDS_SET, "Dynamic depth bounds state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_READ_MASK_SET,
"Dynamic stencil read mask state not set for this command buffer", msg_code);
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_WRITE_MASK_SET,
"Dynamic stencil write mask state not set for this command buffer", msg_code);
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_REFERENCE_SET,
"Dynamic stencil reference state not set for this command buffer", msg_code);
}
if (indexed) {
result |= ValidateStatus(pCB, CBSTATUS_INDEX_BUFFER_BOUND,
"Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
}
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
const auto *line_state =
LvlFindInChain<VkPipelineRasterizationLineStateCreateInfoEXT>(pPipe->graphicsPipelineCI.pRasterizationState->pNext);
if (line_state && line_state->stippledLineEnable) {
result |= ValidateStatus(pCB, CBSTATUS_LINE_STIPPLE_SET, "Dynamic line stipple state not set for this command buffer",
msg_code);
}
}
return result;
}
bool CoreChecks::LogInvalidAttachmentMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
const char *msg, const char *caller, const char *error_code) const {
LogObjectList objlist(rp1_state->renderPass);
objlist.add(rp2_state->renderPass);
return LogError(objlist, error_code,
"%s: RenderPasses incompatible between %s w/ %s and %s w/ %s Attachment %u is not "
"compatible with %u: %s.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), type2_string,
report_data->FormatHandle(rp2_state->renderPass).c_str(), primary_attach, secondary_attach, msg);
}
bool CoreChecks::ValidateAttachmentCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state,
uint32_t primary_attach, uint32_t secondary_attach, const char *caller,
const char *error_code) const {
bool skip = false;
const auto &primary_pass_ci = rp1_state->createInfo;
const auto &secondary_pass_ci = rp2_state->createInfo;
if (primary_pass_ci.attachmentCount <= primary_attach) {
primary_attach = VK_ATTACHMENT_UNUSED;
}
if (secondary_pass_ci.attachmentCount <= secondary_attach) {
secondary_attach = VK_ATTACHMENT_UNUSED;
}
if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
return skip;
}
if (primary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"The first is unused while the second is not.", caller, error_code);
return skip;
}
if (secondary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"The second is unused while the first is not.", caller, error_code);
return skip;
}
if (primary_pass_ci.pAttachments[primary_attach].format != secondary_pass_ci.pAttachments[secondary_attach].format) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different formats.", caller, error_code);
}
if (primary_pass_ci.pAttachments[primary_attach].samples != secondary_pass_ci.pAttachments[secondary_attach].samples) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different samples.", caller, error_code);
}
if (primary_pass_ci.pAttachments[primary_attach].flags != secondary_pass_ci.pAttachments[secondary_attach].flags) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different flags.", caller, error_code);
}
return skip;
}
bool CoreChecks::ValidateSubpassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
const char *caller, const char *error_code) const {
bool skip = false;
const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
uint32_t max_input_attachment_count = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
for (uint32_t i = 0; i < max_input_attachment_count; ++i) {
uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.inputAttachmentCount) {
primary_input_attach = primary_desc.pInputAttachments[i].attachment;
}
if (i < secondary_desc.inputAttachmentCount) {
secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
secondary_input_attach, caller, error_code);
}
uint32_t max_color_attachment_count = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
for (uint32_t i = 0; i < max_color_attachment_count; ++i) {
uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount) {
primary_color_attach = primary_desc.pColorAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount) {
secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
secondary_color_attach, caller, error_code);
if (rp1_state->createInfo.subpassCount > 1) {
uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach,
secondary_resolve_attach, caller, error_code);
}
}
uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
if (primary_desc.pDepthStencilAttachment) {
primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
}
if (secondary_desc.pDepthStencilAttachment) {
secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
secondary_depthstencil_attach, caller, error_code);
// Both renderpasses must agree on Multiview usage
if (primary_desc.viewMask && secondary_desc.viewMask) {
if (primary_desc.viewMask != secondary_desc.viewMask) {
std::stringstream ss;
ss << "For subpass " << subpass << ", they have a different viewMask. The first has view mask " << primary_desc.viewMask
<< " while the second has view mask " << secondary_desc.viewMask << ".";
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, ss.str().c_str(), caller, error_code);
}
} else if (primary_desc.viewMask) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The first uses Multiview (has non-zero viewMasks) while the second one does not.", caller,
error_code);
} else if (secondary_desc.viewMask) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The second uses Multiview (has non-zero viewMasks) while the first one does not.", caller,
error_code);
}
return skip;
}
bool CoreChecks::LogInvalidPnextMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, const char *msg, const char *caller,
const char *error_code) const {
LogObjectList objlist(rp1_state->renderPass);
objlist.add(rp2_state->renderPass);
return LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s and %s w/ %s: %s", caller, type1_string,
report_data->FormatHandle(rp1_state->renderPass).c_str(), type2_string,
report_data->FormatHandle(rp2_state->renderPass).c_str(), msg);
}
// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
// This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
// will then feed into this function
bool CoreChecks::ValidateRenderPassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *caller,
const char *error_code) const {
bool skip = false;
// createInfo flags must be identical for the renderpasses to be compatible.
if (rp1_state->createInfo.flags != rp2_state->createInfo.flags) {
LogObjectList objlist(rp1_state->renderPass);
objlist.add(rp2_state->renderPass);
skip |=
LogError(objlist, error_code,
"%s: RenderPasses incompatible between %s w/ %s with flags of %u and %s w/ "
"%s with a flags of %u.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), rp1_state->createInfo.flags,
type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(), rp2_state->createInfo.flags);
}
if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
LogObjectList objlist(rp1_state->renderPass);
objlist.add(rp2_state->renderPass);
skip |= LogError(objlist, error_code,
"%s: RenderPasses incompatible between %s w/ %s with a subpassCount of %u and %s w/ "
"%s with a subpassCount of %u.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(),
rp1_state->createInfo.subpassCount, type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(),
rp2_state->createInfo.subpassCount);
} else {
for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
skip |= ValidateSubpassCompatibility(type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
}
}
// Find an entry of the Fragment Density Map type in the pNext chain, if it exists
const auto fdm1 = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp1_state->createInfo.pNext);
const auto fdm2 = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp2_state->createInfo.pNext);
// Both renderpasses must agree on usage of a Fragment Density Map type
if (fdm1 && fdm2) {
uint32_t primary_input_attach = fdm1->fragmentDensityMapAttachment.attachment;
uint32_t secondary_input_attach = fdm2->fragmentDensityMapAttachment.attachment;
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
secondary_input_attach, caller, error_code);
} else if (fdm1) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The first uses a Fragment Density Map while the second one does not.", caller, error_code);
} else if (fdm2) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The second uses a Fragment Density Map while the first one does not.", caller, error_code);
}
return skip;
}
// For given pipeline, return number of MSAA samples, or one if MSAA disabled
static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) {
if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
}
return VK_SAMPLE_COUNT_1_BIT;
}
static void ListBits(std::ostream &s, uint32_t bits) {
for (int i = 0; i < 32 && bits; i++) {
if (bits & (1 << i)) {
s << i;
bits &= ~(1 << i);
if (bits) {
s << ",";
}
}
}
}
std::string DynamicStateString(CBStatusFlags input_value) {
std::string ret;
int index = 0;
while (input_value) {
if (input_value & 1) {
if (!ret.empty()) ret.append("|");
ret.append(string_VkDynamicState(ConvertToDynamicState(static_cast<CBStatusFlagBits>(1 << index))));
}
++index;
input_value >>= 1;
}
if (ret.empty()) ret.append(string_VkDynamicState(ConvertToDynamicState(static_cast<CBStatusFlagBits>(0))));
return ret;
}
// Validate draw-time state related to the PSO
bool CoreChecks::ValidatePipelineDrawtimeState(const LAST_BOUND_STATE &state, const CMD_BUFFER_STATE *pCB, CMD_TYPE cmd_type,
const PIPELINE_STATE *pPipeline, const char *caller) const {
bool skip = false;
const auto ¤t_vtx_bfr_binding_info = pCB->current_vertex_buffer_binding_info.vertex_buffer_bindings;
const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type);
// Verify vertex & index buffer for unprotected command buffer.
// Because vertex & index buffer is read only, it doesn't need to care protected command buffer case.
if (enabled_features.core11.protectedMemory == VK_TRUE) {
for (const auto &buffer_binding : current_vtx_bfr_binding_info) {
if (buffer_binding.buffer_state && !buffer_binding.buffer_state->destroyed) {
skip |= ValidateProtectedBuffer(pCB, buffer_binding.buffer_state.get(), caller, vuid.unprotected_command_buffer,
"Buffer is vertex buffer");
}
}
if (pCB->index_buffer_binding.buffer_state && !pCB->index_buffer_binding.buffer_state->destroyed) {
skip |= ValidateProtectedBuffer(pCB, pCB->index_buffer_binding.buffer_state.get(), caller,
vuid.unprotected_command_buffer, "Buffer is index buffer");
}
}
// Verify if using dynamic state setting commands that it doesn't set up in pipeline
CBStatusFlags invalid_status = CBSTATUS_ALL_STATE_SET & ~(pCB->dynamic_status | pCB->static_status);
if (invalid_status) {
std::string dynamic_states = DynamicStateString(invalid_status);
LogObjectList objlist(pCB->commandBuffer);
objlist.add(pPipeline->pipeline);
skip |= LogError(objlist, vuid.dynamic_state_setting_commands,
"%s: %s doesn't set up %s, but it calls the related dynamic state setting commands", caller,
report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), dynamic_states.c_str());
}
// Verify vertex binding
if (pPipeline->vertex_binding_descriptions_.size() > 0) {
for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) {
const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding;
if (current_vtx_bfr_binding_info.size() < (vertex_binding + 1)) {
skip |= LogError(pCB->commandBuffer, vuid.vertex_binding,
"%s: %s expects that this Command Buffer's vertex binding Index %u should be set via "
"vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
"index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
caller, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), vertex_binding, i,
vertex_binding);
} else if ((current_vtx_bfr_binding_info[vertex_binding].buffer_state == nullptr) &&
!enabled_features.robustness2_features.nullDescriptor) {
skip |= LogError(pCB->commandBuffer, vuid.vertex_binding_null,
"%s: Vertex binding %d must not be VK_NULL_HANDLE %s expects that this Command Buffer's vertex "
"binding Index %u should be set via "
"vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
"index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
caller, vertex_binding, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(),
vertex_binding, i, vertex_binding);
}
}
// Verify vertex attribute address alignment
for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) {
const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i];
const auto vertex_binding = attribute_description.binding;
const auto attribute_offset = attribute_description.offset;
const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding);
if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) &&
(vertex_binding < current_vtx_bfr_binding_info.size()) &&
((current_vtx_bfr_binding_info[vertex_binding].buffer_state) ||
enabled_features.robustness2_features.nullDescriptor)) {
auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride;
if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT)) {
vertex_buffer_stride = static_cast<uint32_t>(current_vtx_bfr_binding_info[vertex_binding].stride);
uint32_t attribute_binding_extent =
attribute_description.offset + FormatElementSize(attribute_description.format);
if (vertex_buffer_stride < attribute_binding_extent) {
skip |=
LogError(pCB->commandBuffer, "VUID-vkCmdBindVertexBuffers2EXT-pStrides-03363",
"The pStrides[%u] (%u) parameter in the last call to vkCmdBindVertexBuffers2EXT is less than "
"the extent of the binding for attribute %u (%u).",
vertex_binding, vertex_buffer_stride, i, attribute_binding_extent);
}
}
const auto vertex_buffer_offset = current_vtx_bfr_binding_info[vertex_binding].offset;
// Use 1 as vertex/instance index to use buffer stride as well
const auto attrib_address = vertex_buffer_offset + vertex_buffer_stride + attribute_offset;
VkDeviceSize vtx_attrib_req_alignment = pPipeline->vertex_attribute_alignments_[i];
if (SafeModulo(attrib_address, vtx_attrib_req_alignment) != 0) {
LogObjectList objlist(current_vtx_bfr_binding_info[vertex_binding].buffer_state->buffer);
objlist.add(state.pipeline_state->pipeline);
skip |= LogError(
objlist, vuid.vertex_binding_attribute,
"%s: Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER
", %s,from of %s and vertex %s.",
caller, i, string_VkFormat(attribute_description.format),
report_data->FormatHandle(state.pipeline_state->pipeline).c_str(),
report_data->FormatHandle(current_vtx_bfr_binding_info[vertex_binding].buffer_state->buffer).c_str());
}
} else {
LogObjectList objlist(pCB->commandBuffer);
objlist.add(state.pipeline_state->pipeline);
skip |= LogError(objlist, vuid.vertex_binding_attribute,
"%s: binding #%" PRIu32
" in pVertexAttributeDescriptions of %s is invalid in vkCmdBindVertexBuffers of %s.",
caller, vertex_binding, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(),
report_data->FormatHandle(pCB->commandBuffer).c_str());
}
}
}
// If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
// Skip check if rasterization is disabled, if there is no viewport, or if viewport/scissors are being inherited.
bool dyn_viewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
pPipeline->graphicsPipelineCI.pViewportState &&
pCB->inheritedViewportDepths.size() == 0) {
bool dyn_scissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
// NB (akeley98): Current validation layers do not detect the error where vkCmdSetViewport (or scissor) was called, but
// the dynamic state set is overwritten by binding a graphics pipeline with static viewport (scissor) state.
// This condition be detected by checking trashedViewportMask & viewportMask (trashedScissorMask & scissorMask) is
// nonzero in the range of bits needed by the pipeline.
if (dyn_viewport) {
const auto required_viewports_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
const auto missing_viewport_mask = ~pCB->viewportMask & required_viewports_mask;
if (missing_viewport_mask) {
std::stringstream ss;
ss << caller << ": Dynamic viewport(s) ";
ListBits(ss, missing_viewport_mask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
skip |= LogError(device, vuid.dynamic_state, "%s", ss.str().c_str());
}
}
if (dyn_scissor) {
const auto required_scissor_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
const auto missing_scissor_mask = ~pCB->scissorMask & required_scissor_mask;
if (missing_scissor_mask) {
std::stringstream ss;
ss << caller << ": Dynamic scissor(s) ";
ListBits(ss, missing_scissor_mask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
skip |= LogError(device, vuid.dynamic_state, "%s", ss.str().c_str());
}
}
bool dyn_viewport_count = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT);
bool dyn_scissor_count = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT);
// VUID {refpage}-viewportCount-03417
if (dyn_viewport_count && !dyn_scissor_count) {
const auto required_viewport_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
const auto missing_viewport_mask = ~pCB->viewportWithCountMask & required_viewport_mask;
if (missing_viewport_mask) {
std::stringstream ss;
ss << caller << ": Dynamic viewport with count ";
ListBits(ss, missing_viewport_mask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewportWithCountEXT().";
skip |= LogError(device, vuid.viewport_count, "%s", ss.str().c_str());
}
}
// VUID {refpage}-scissorCount-03418
if (dyn_scissor_count && !dyn_viewport_count) {
const auto required_scissor_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
const auto missing_scissor_mask = ~pCB->scissorWithCountMask & required_scissor_mask;
if (missing_scissor_mask) {
std::stringstream ss;
ss << caller << ": Dynamic scissor with count ";
ListBits(ss, missing_scissor_mask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissorWithCountEXT().";
skip |= LogError(device, vuid.scissor_count, "%s", ss.str().c_str());
}
}
// VUID {refpage}-viewportCount-03419
if (dyn_scissor_count && dyn_viewport_count) {
if (pCB->viewportWithCountMask != pCB->scissorWithCountMask) {
std::stringstream ss;
ss << caller << ": Dynamic viewport and scissor with count ";
ListBits(ss, pCB->viewportWithCountMask ^ pCB->scissorWithCountMask);
ss << " are used by pipeline state object, but were not provided via matching calls to "
"vkCmdSetViewportWithCountEXT and vkCmdSetScissorWithCountEXT().";
skip |= LogError(device, vuid.viewport_scissor_count, "%s", ss.str().c_str());
}
}
}
// If inheriting viewports, verify that not using more than inherited.
if (pCB->inheritedViewportDepths.size() != 0 && dyn_viewport) {
uint32_t viewport_count = pPipeline->graphicsPipelineCI.pViewportState->viewportCount;
uint32_t max_inherited = uint32_t(pCB->inheritedViewportDepths.size());
if (viewport_count > max_inherited) {
skip |= LogError(device, vuid.dynamic_state,
"Pipeline requires more viewports (%u) than inherited (viewportDepthCount=%u).",
unsigned(viewport_count), unsigned(max_inherited));
}
}
// Verify that any MSAA request in PSO matches sample# in bound FB
// Skip the check if rasterization is disabled.
if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline);
if (pCB->activeRenderPass) {
const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr();
const VkSubpassDescription2 *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
uint32_t i;
unsigned subpass_num_samples = 0;
for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
subpass_num_samples |= static_cast<unsigned>(render_pass_info->pAttachments[attachment].samples);
}
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= static_cast<unsigned>(render_pass_info->pAttachments[attachment].samples);
}
if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples) &&
((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
LogObjectList objlist(pPipeline->pipeline);
objlist.add(pCB->activeRenderPass->renderPass);
skip |=
LogError(objlist, vuid.rasterization_samples,
"%s: In %s the sample count is %s while the current %s has %s and they need to be the same.", caller,
report_data->FormatHandle(pPipeline->pipeline).c_str(), string_VkSampleCountFlagBits(pso_num_samples),
report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(),
string_VkSampleCountFlags(static_cast<VkSampleCountFlags>(subpass_num_samples)).c_str());
}
} else {
skip |= LogError(pPipeline->pipeline, kVUID_Core_DrawState_NoActiveRenderpass,
"%s: No active render pass found at draw-time in %s!", caller,
report_data->FormatHandle(pPipeline->pipeline).c_str());
}
}
// Verify that PSO creation renderPass is compatible with active renderPass
if (pCB->activeRenderPass) {
// TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) {
// renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
skip |= ValidateRenderPassCompatibility("active render pass", pCB->activeRenderPass.get(), "pipeline state object",
pPipeline->rp_state.get(), caller, vuid.render_pass_compatible);
}
if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
skip |=
LogError(pPipeline->pipeline, vuid.subpass_index, "%s: Pipeline was built for subpass %u but used in subpass %u.",
caller, pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass);
}
// Check if depth stencil attachment was created with sample location compatible bit
if (pPipeline->sample_location_enabled == VK_TRUE) {
const safe_VkAttachmentReference2 *ds_attachment =
pCB->activeRenderPass->createInfo.pSubpasses[pCB->activeSubpass].pDepthStencilAttachment;
const FRAMEBUFFER_STATE *fb_state = pCB->activeFramebuffer.get();
if ((ds_attachment != nullptr) && (fb_state != nullptr)) {
const uint32_t attachment = ds_attachment->attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
const auto *imageview_state = GetActiveAttachmentImageViewState(pCB, attachment);
if (imageview_state != nullptr) {
const IMAGE_STATE *image_state = GetImageState(imageview_state->create_info.image);
if (image_state != nullptr) {
if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT) == 0) {
skip |= LogError(pPipeline->pipeline, vuid.sample_location,
"%s: sampleLocationsEnable is true for the pipeline, but the subpass (%u) depth "
"stencil attachment's VkImage was not created with "
"VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT.",
caller, pCB->activeSubpass);
}
}
}
}
}
}
}
skip |= ValidateStatus(pCB, CBSTATUS_PATCH_CONTROL_POINTS_SET, "Dynamic patch control points not set for this command buffer",
vuid.patch_control_points);
skip |= ValidateStatus(pCB, CBSTATUS_RASTERIZER_DISCARD_ENABLE_SET,
"Dynamic rasterizer discard enable not set for this command buffer", vuid.rasterizer_discard_enable);
skip |= ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_ENABLE_SET, "Dynamic depth bias enable not set for this command buffer",
vuid.depth_bias_enable);
skip |= ValidateStatus(pCB, CBSTATUS_LOGIC_OP_SET, "Dynamic state logicOp not set for this command buffer", vuid.logic_op);
skip |= ValidateStatus(pCB, CBSTATUS_PRIMITIVE_RESTART_ENABLE_SET,
"Dynamic primitive restart enable not set for this command buffer", vuid.primitive_restart_enable);
// VUID {refpage}-primitiveTopology-03420
skip |= ValidateStatus(pCB, CBSTATUS_PRIMITIVE_TOPOLOGY_SET, "Dynamic primitive topology state not set for this command buffer",
vuid.primitive_topology);
if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT)) {
bool compatible_topology = false;
switch (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology) {
case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
compatible_topology = true;
break;
default:
break;
}
break;
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
compatible_topology = true;
break;
default:
break;
}
break;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
compatible_topology = true;
break;
default:
break;
}
break;
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
compatible_topology = true;
break;
default:
break;
}
break;
default:
break;
}
if (!compatible_topology) {
skip |= LogError(pPipeline->pipeline, vuid.primitive_topology,
"%s: the last primitive topology %s state set by vkCmdSetPrimitiveTopologyEXT is "
"not compatible with the pipeline topology %s.",
caller, string_VkPrimitiveTopology(pCB->primitiveTopology),
string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
}
if (enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate) {
skip |= ValidateGraphicsPipelineShaderDynamicState(pPipeline, pCB, caller, vuid);
}
return skip;
}
// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
// pipelineLayout[layoutIndex]
static bool VerifySetLayoutCompatibility(const debug_report_data *report_data, const cvdescriptorset::DescriptorSet *descriptor_set,
PIPELINE_LAYOUT_STATE const *pipeline_layout, const uint32_t layoutIndex,
string &errorMsg) {
auto num_sets = pipeline_layout->set_layouts.size();
if (layoutIndex >= num_sets) {
stringstream error_str;
error_str << report_data->FormatHandle(pipeline_layout->layout) << ") only contains " << num_sets
<< " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
<< layoutIndex;
errorMsg = error_str.str();
return false;
}
if (descriptor_set->IsPushDescriptor()) return true;
auto layout_node = pipeline_layout->set_layouts[layoutIndex].get();
return cvdescriptorset::VerifySetLayoutCompatibility(report_data, layout_node, descriptor_set->GetLayout().get(), &errorMsg);
}
// Validate overall state at the time of a draw call
bool CoreChecks::ValidateCmdBufDrawState(const CMD_BUFFER_STATE *cb_node, CMD_TYPE cmd_type, const bool indexed,
const VkPipelineBindPoint bind_point, const char *function) const {
const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type);
const auto lv_bind_point = ConvertToLvlBindPoint(bind_point);
const auto &state = cb_node->lastBound[lv_bind_point];
const auto *pipe = state.pipeline_state;
if (nullptr == pipe) {
return LogError(cb_node->commandBuffer, vuid.pipeline_bound,
"Must not call %s on this command buffer while there is no %s pipeline bound.", function,
bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR
? "RayTracing"
: bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute");
}
bool result = false;
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) {
// First check flag states
result |= ValidateDrawStateFlags(cb_node, pipe, indexed, vuid.dynamic_state);
if (cb_node->activeRenderPass && cb_node->activeFramebuffer) {
// Verify attachments for unprotected/protected command buffer.
if (enabled_features.core11.protectedMemory == VK_TRUE && cb_node->active_attachments) {
uint32_t i = 0;
for (const auto &view_state : *cb_node->active_attachments.get()) {
const auto &subpass = cb_node->active_subpasses->at(i);
if (subpass.used && view_state && !view_state->destroyed) {
std::string image_desc = "Image is ";
image_desc.append(string_VkImageUsageFlagBits(subpass.usage));
// Because inputAttachment is read only, it doesn't need to care protected command buffer case.
// Some CMD_TYPE could not be protected. See VUID 02711.
if (subpass.usage != VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT &&
vuid.protected_command_buffer != kVUIDUndefined) {
result |= ValidateUnprotectedImage(cb_node, view_state->image_state.get(), function,
vuid.protected_command_buffer, image_desc.c_str());
}
result |= ValidateProtectedImage(cb_node, view_state->image_state.get(), function,
vuid.unprotected_command_buffer, image_desc.c_str());
}
++i;
}
}
}
}
// Now complete other state checks
string error_string;
auto const &pipeline_layout = pipe->pipeline_layout.get();
// Check if the current pipeline is compatible for the maximum used set with the bound sets.
if (pipe->active_slots.size() > 0 && !CompatForSet(pipe->max_active_slot, state, pipeline_layout->compat_for_set)) {
LogObjectList objlist(pipe->pipeline);
objlist.add(pipeline_layout->layout);
objlist.add(state.pipeline_layout);
result |= LogError(objlist, vuid.compatible_pipeline,
"%s(): %s defined with %s is not compatible for maximum set statically used %" PRIu32
" with bound descriptor sets, last bound with %s",
CommandTypeString(cmd_type), report_data->FormatHandle(pipe->pipeline).c_str(),
report_data->FormatHandle(pipeline_layout->layout).c_str(), pipe->max_active_slot,
report_data->FormatHandle(state.pipeline_layout).c_str());
}
for (const auto &set_binding_pair : pipe->active_slots) {
uint32_t set_index = set_binding_pair.first;
// If valid set is not bound throw an error
if ((state.per_set.size() <= set_index) || (!state.per_set[set_index].bound_descriptor_set)) {
result |= LogError(cb_node->commandBuffer, kVUID_Core_DrawState_DescriptorSetNotBound,
"%s(): %s uses set #%u but that set is not bound.", CommandTypeString(cmd_type),
report_data->FormatHandle(pipe->pipeline).c_str(), set_index);
} else if (!VerifySetLayoutCompatibility(report_data, state.per_set[set_index].bound_descriptor_set, pipeline_layout,
set_index, error_string)) {
// Set is bound but not compatible w/ overlapping pipeline_layout from PSO
VkDescriptorSet set_handle = state.per_set[set_index].bound_descriptor_set->GetSet();
LogObjectList objlist(set_handle);
objlist.add(pipeline_layout->layout);
result |= LogError(objlist, kVUID_Core_DrawState_PipelineLayoutsIncompatible,
"%s(): %s bound as set #%u is not compatible with overlapping %s due to: %s",
CommandTypeString(cmd_type), report_data->FormatHandle(set_handle).c_str(), set_index,
report_data->FormatHandle(pipeline_layout->layout).c_str(), error_string.c_str());
} else { // Valid set is bound and layout compatible, validate that it's updated
// Pull the set node
const cvdescriptorset::DescriptorSet *descriptor_set = state.per_set[set_index].bound_descriptor_set;
// Validate the draw-time state for this descriptor set
std::string err_str;
// For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
// binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
// Here, the currently bound pipeline determines whether an image validation check is redundant...
// for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second);
const auto &binding_req_map = reduced_map.FilteredMap(*cb_node, *pipe);
// We can skip validating the descriptor set if "nothing" has changed since the last validation.
// Same set, no image layout changes, and same "pipeline state" (binding_req_map). If there are
// any dynamic descriptors, always revalidate rather than caching the values. We currently only
// apply this optimization if IsManyDescriptors is true, to avoid the overhead of copying the
// binding_req_map which could potentially be expensive.
bool descriptor_set_changed =
!reduced_map.IsManyDescriptors() ||
// Revalidate each time if the set has dynamic offsets
state.per_set[set_index].dynamicOffsets.size() > 0 ||
// Revalidate if descriptor set (or contents) has changed
state.per_set[set_index].validated_set != descriptor_set ||
state.per_set[set_index].validated_set_change_count != descriptor_set->GetChangeCount() ||
(!disabled[image_layout_validation] &&
state.per_set[set_index].validated_set_image_layout_change_count != cb_node->image_layout_change_count);
bool need_validate = descriptor_set_changed ||
// Revalidate if previous bindingReqMap doesn't include new bindingReqMap
!std::includes(state.per_set[set_index].validated_set_binding_req_map.begin(),
state.per_set[set_index].validated_set_binding_req_map.end(),
binding_req_map.begin(), binding_req_map.end());
if (need_validate) {
if (!descriptor_set_changed && reduced_map.IsManyDescriptors()) {
// Only validate the bindings that haven't already been validated
BindingReqMap delta_reqs;
std::set_difference(binding_req_map.begin(), binding_req_map.end(),
state.per_set[set_index].validated_set_binding_req_map.begin(),
state.per_set[set_index].validated_set_binding_req_map.end(),
layer_data::insert_iterator<BindingReqMap>(delta_reqs, delta_reqs.begin()));
result |=
ValidateDrawState(descriptor_set, delta_reqs, state.per_set[set_index].dynamicOffsets, cb_node,
cb_node->active_attachments.get(), *cb_node->active_subpasses.get(), function, vuid);
} else {
result |=
ValidateDrawState(descriptor_set, binding_req_map, state.per_set[set_index].dynamicOffsets, cb_node,
cb_node->active_attachments.get(), *cb_node->active_subpasses.get(), function, vuid);
}
}
}
}
// Check general pipeline state that needs to be validated at drawtime
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) {
result |= ValidatePipelineDrawtimeState(state, cb_node, cmd_type, pipe, function);
}
// Verify if push constants have been set
// NOTE: Currently not checking whether active push constants are compatible with the active pipeline, nor whether the
// "life times" of push constants are correct.
// Discussion on validity of these checks can be found at https://gitlab.khronos.org/vulkan/vulkan/-/issues/2602.
if (!cb_node->push_constant_data_ranges || (pipeline_layout->push_constant_ranges == cb_node->push_constant_data_ranges)) {
for (const auto &stage : pipe->stage_state) {
const auto *entrypoint =
stage.shader_state.get()->FindEntrypointStruct(stage.entry_point_name.c_str(), stage.stage_flag);
if (!entrypoint || !entrypoint->push_constant_used_in_shader.IsUsed()) {
continue;
}
// Edge case where if the shader is using push constants statically and there never was a vkCmdPushConstants
if (!cb_node->push_constant_data_ranges) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(pipeline_layout->layout);
objlist.add(pipe->pipeline);
result |= LogError(objlist, vuid.push_constants_set,
"%s(): Shader in %s uses push-constant statically but vkCmdPushConstants was not called yet for "
"pipeline layout %s.",
CommandTypeString(cmd_type), string_VkShaderStageFlags(stage.stage_flag).c_str(),
report_data->FormatHandle(pipeline_layout->layout).c_str());
}
const auto it = cb_node->push_constant_data_update.find(stage.stage_flag);
if (it == cb_node->push_constant_data_update.end()) {
// This error has been printed in ValidatePushConstantUsage.
break;
}
}
}
return result;
}
bool CoreChecks::ValidatePipelineLocked(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) const {
bool skip = false;
const PIPELINE_STATE *pipeline = pPipelines[pipelineIndex].get();
// If create derivative bit is set, check that we've specified a base
// pipeline correctly, and that the base pipeline was created to allow
// derivatives.
if (pipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *base_pipeline = nullptr;
if (!((pipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
(pipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
// TODO: This check is a superset of VUID-VkGraphicsPipelineCreateInfo-flags-00724 and
// TODO: VUID-VkGraphicsPipelineCreateInfo-flags-00725
skip |= LogError(device, kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo[%d]: exactly one of base pipeline index and handle must be specified",
pipelineIndex);
} else if (pipeline->graphicsPipelineCI.basePipelineIndex != -1) {
if (pipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
skip |=
LogError(device, "VUID-vkCreateGraphicsPipelines-flags-00720",
"Invalid Pipeline CreateInfo[%d]: base pipeline must occur earlier in array than derivative pipeline.",
pipelineIndex);
} else {
base_pipeline = pPipelines[pipeline->graphicsPipelineCI.basePipelineIndex].get();
}
} else if (pipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
base_pipeline = GetPipelineState(pipeline->graphicsPipelineCI.basePipelineHandle);
}
if (base_pipeline && !(base_pipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= LogError(device, kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo[%d]: base pipeline does not allow derivatives.", pipelineIndex);
}
}
// Check for portability errors
if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) {
if ((VK_FALSE == enabled_features.portability_subset_features.triangleFans) &&
(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN == pipeline->topology_at_rasterizer)) {
skip |=
LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-triangleFans-04452",
"Invalid Pipeline CreateInfo[%d] (portability error): VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN is not supported",
pipelineIndex);
}
// Validate vertex inputs
for (const auto &desc : pipeline->vertex_binding_descriptions_) {
if ((desc.stride < phys_dev_ext_props.portability_props.minVertexInputBindingStrideAlignment) ||
((desc.stride % phys_dev_ext_props.portability_props.minVertexInputBindingStrideAlignment) != 0)) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDescription-stride-04456",
"Invalid Pipeline CreateInfo[%d] (portability error): Vertex input stride must be at least as large as and a "
"multiple of VkPhysicalDevicePortabilitySubsetPropertiesKHR::minVertexInputBindingStrideAlignment.",
pipelineIndex);
}
}
// Validate vertex attributes
if (VK_FALSE == enabled_features.portability_subset_features.vertexAttributeAccessBeyondStride) {
for (const auto &attrib : pipeline->vertex_attribute_descriptions_) {
const auto vertex_binding_map_it = pipeline->vertex_binding_to_index_map_.find(attrib.binding);
if (vertex_binding_map_it != pipeline->vertex_binding_to_index_map_.cend()) {
const auto& desc = pipeline->vertex_binding_descriptions_[vertex_binding_map_it->second];
if ((attrib.offset + FormatElementSize(attrib.format)) > desc.stride) {
skip |= LogError(device, "VUID-VkVertexInputAttributeDescription-vertexAttributeAccessBeyondStride-04457",
"Invalid Pipeline CreateInfo[%d] (portability error): (attribute.offset + "
"sizeof(vertex_description.format)) is larger than the vertex stride",
pipelineIndex);
}
}
}
}
// Validate polygon mode
auto raster_state_ci = pipeline->graphicsPipelineCI.pRasterizationState;
if ((VK_FALSE == enabled_features.portability_subset_features.pointPolygons) && raster_state_ci &&
(VK_FALSE == raster_state_ci->rasterizerDiscardEnable) && (VK_POLYGON_MODE_POINT == raster_state_ci->polygonMode)) {
skip |=
LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-pointPolygons-04458",
"Invalid Pipeline CreateInfo[%d] (portability error): point polygons are not supported", pipelineIndex);
}
}
return skip;
}
// UNLOCKED pipeline validation. DO NOT lookup objects in the CoreChecks->* maps in this function.
bool CoreChecks::ValidatePipelineUnlocked(const PIPELINE_STATE *pPipeline, uint32_t pipelineIndex) const {
bool skip = false;
// Ensure the subpass index is valid. If not, then ValidateGraphicsPipelineShaderState
// produces nonsense errors that confuse users. Other layers should already
// emit errors for renderpass being invalid.
auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-00759",
"Invalid Pipeline CreateInfo[%u] State: Subpass index %u is out of range for this renderpass (0..%u).",
pipelineIndex, pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1);
subpass_desc = nullptr;
}
if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
if (subpass_desc && color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: %s subpass %u has colorAttachmentCount of %u which doesn't "
"match the pColorBlendState->attachmentCount of %u.",
pipelineIndex, report_data->FormatHandle(pPipeline->rp_state->renderPass).c_str(),
pPipeline->graphicsPipelineCI.subpass, subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount);
}
if (!enabled_features.core.independentBlend) {
if (pPipeline->attachments.size() > 1) {
const VkPipelineColorBlendAttachmentState *const attachments = &pPipeline->attachments[0];
for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
// Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
// settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
// only attachment state, so memcmp is best suited for the comparison
if (memcmp(static_cast<const void *>(attachments), static_cast<const void *>(&attachments[i]),
sizeof(attachments[0]))) {
skip |=
LogError(device, "VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605",
"Invalid Pipeline CreateInfo[%u]: If independent blend feature not enabled, all elements of "
"pAttachments must be identical.",
pipelineIndex);
break;
}
}
}
}
if (!enabled_features.core.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606",
"Invalid Pipeline CreateInfo[%u]: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.",
pipelineIndex);
}
for (size_t i = 0; i < pPipeline->attachments.size(); i++) {
if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor);
}
}
if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor);
}
}
if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor);
}
}
if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor);
}
}
}
}
if (ValidateGraphicsPipelineShaderState(pPipeline)) {
skip = true;
}
// Each shader's stage must be unique
if (pPipeline->duplicate_shaders) {
for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
if (pPipeline->duplicate_shaders & stage) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00726",
"Invalid Pipeline CreateInfo[%u] State: Multiple shaders provided for stage %s", pipelineIndex,
string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
}
}
}
if (device_extensions.vk_nv_mesh_shader) {
// VS or mesh is required
if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-02096",
"Invalid Pipeline CreateInfo[%u] State: Vertex Shader or Mesh Shader required.", pipelineIndex);
}
// Can't mix mesh and VTG
if ((pPipeline->active_shaders & (VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV)) &&
(pPipeline->active_shaders &
(VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02095",
"Invalid Pipeline CreateInfo[%u] State: Geometric shader stages must either be all mesh (mesh | task) "
"or all VTG (vertex, tess control, tess eval, geom).",
pipelineIndex);
}
} else {
// VS is required
if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00727",
"Invalid Pipeline CreateInfo[%u] State: Vertex Shader required.", pipelineIndex);
}
}
if (!enabled_features.mesh_shader.meshShader && (pPipeline->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02091",
"Invalid Pipeline CreateInfo[%u] State: Mesh Shader not supported.", pipelineIndex);
}
if (!enabled_features.mesh_shader.taskShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TASK_BIT_NV)) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02092",
"Invalid Pipeline CreateInfo[%u] State: Task Shader not supported.", pipelineIndex);
}
// Either both or neither TC/TE shaders should be defined
bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
if (has_control && !has_eval) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00729",
"Invalid Pipeline CreateInfo[%u] State: TE and TC shaders must be included or excluded as a pair.",
pipelineIndex);
}
if (!has_control && has_eval) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00730",
"Invalid Pipeline CreateInfo[%u] State: TE and TC shaders must be included or excluded as a pair.",
pipelineIndex);
}
// Compute shaders should be specified independent of Gfx shaders
if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00728",
"Invalid Pipeline CreateInfo[%u] State: Do not specify Compute Shader for Gfx Pipeline.", pipelineIndex);
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pInputAssemblyState) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02098",
"Invalid Pipeline CreateInfo[%u] State: Missing pInputAssemblyState.", pipelineIndex);
}
// VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
// Mismatching primitive topology and tessellation fails graphics pipeline creation.
if (has_control && has_eval &&
(!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00736",
"Invalid Pipeline CreateInfo[%u] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
"tessellation pipelines.",
pipelineIndex);
}
if (pPipeline->graphicsPipelineCI.pInputAssemblyState) {
if (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
if (!has_control || !has_eval) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-topology-00737",
"Invalid Pipeline CreateInfo[%u] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
"for tessellation pipelines.",
pipelineIndex);
}
}
if ((pPipeline->graphicsPipelineCI.pInputAssemblyState->primitiveRestartEnable == VK_TRUE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= LogError(
device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and primitiveRestartEnable is VK_TRUE. It is invalid.",
pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
if ((enabled_features.core.geometryShader == VK_FALSE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY)) {
skip |=
LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and geometry shaders feature is not enabled. "
"It is invalid.",
pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
if ((enabled_features.core.tessellationShader == VK_FALSE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |=
LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00430",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and tessellation shaders feature is not "
"enabled. It is invalid.",
pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
}
// If a rasterization state is provided...
if (pPipeline->graphicsPipelineCI.pRasterizationState) {
if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
(!enabled_features.core.depthClamp)) {
skip |= LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthClamp device feature is disabled: the "
"depthClampEnable member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.",
pipelineIndex);
}
if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
(pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00754",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthBiasClamp device feature is disabled: the "
"depthBiasClamp member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
"VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled",
pipelineIndex);
}
// If rasterization is enabled...
if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
(!enabled_features.core.alphaToOne)) {
skip |= LogError(
device, "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: the alphaToOne device feature is disabled: the alphaToOneEnable "
"member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.",
pipelineIndex);
}
// If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00752",
"Invalid Pipeline CreateInfo[%u] State: pDepthStencilState is NULL when rasterization is enabled "
"and subpass uses a depth/stencil attachment.",
pipelineIndex);
} else if (pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) {
if (!enabled_features.core.depthBounds) {
skip |=
LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthBounds device feature is disabled: the "
"depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
"set to VK_FALSE.",
pipelineIndex);
}
// The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs
if (!device_extensions.vk_ext_depth_range_unrestricted &&
!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
const float minDepthBounds = pPipeline->graphicsPipelineCI.pDepthStencilState->minDepthBounds;
const float maxDepthBounds = pPipeline->graphicsPipelineCI.pDepthStencilState->maxDepthBounds;
// Also VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00755
if (!(minDepthBounds >= 0.0) || !(minDepthBounds <= 1.0)) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: VK_EXT_depth_range_unrestricted extension "
"is not enabled, VK_DYNAMIC_STATE_DEPTH_BOUNDS is not used, depthBoundsTestEnable is "
"true, and pDepthStencilState::minDepthBounds (=%f) is not within the [0.0, 1.0] range.",
minDepthBounds);
}
if (!(maxDepthBounds >= 0.0) || !(maxDepthBounds <= 1.0)) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: VK_EXT_depth_range_unrestricted extension "
"is not enabled, VK_DYNAMIC_STATE_DEPTH_BOUNDS is not used, depthBoundsTestEnable is "
"true, and pDepthStencilState::maxDepthBounds (=%f) is not within the [0.0, 1.0] range.",
maxDepthBounds);
}
}
}
}
// If subpass uses color attachments, pColorBlendState must be valid pointer
if (subpass_desc) {
uint32_t color_attachment_count = 0;
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
++color_attachment_count;
}
}
if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753",
"Invalid Pipeline CreateInfo[%u] State: pColorBlendState is NULL when rasterization is enabled and "
"subpass uses color attachments.",
pipelineIndex);
}
}
}
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pVertexInputState) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02097",
"Invalid Pipeline CreateInfo[%u] State: Missing pVertexInputState.", pipelineIndex);
}
auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
if (vi != NULL) {
for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
VkFormat format = vi->pVertexAttributeDescriptions[j].format;
// Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
VkFormatProperties properties;
DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &properties);
if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
skip |=
LogError(device, "VUID-VkVertexInputAttributeDescription-format-00623",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
"(%s) is not a supported vertex buffer format.",
pipelineIndex, j, string_VkFormat(format));
}
}
}
if (subpass_desc && pPipeline->graphicsPipelineCI.pMultisampleState) {
const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = pPipeline->graphicsPipelineCI.pMultisampleState;
auto accum_color_samples = [subpass_desc, pPipeline](uint32_t &samples) {
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
}
};
if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples)) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_num_samples = 0;
accum_color_samples(subpass_num_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
// subpass_num_samples is 0 when the subpass has no attachments or if all attachments are VK_ATTACHMENT_UNUSED.
// Only validate the value of subpass_num_samples if the subpass has attachments that are not VK_ATTACHMENT_UNUSED.
if (subpass_num_samples && (!IsPowerOfTwo(subpass_num_samples) || (subpass_num_samples != raster_samples))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"does not match the number of samples of the RenderPass color and/or depth attachment.",
pipelineIndex, raster_samples);
}
}
if (device_extensions.vk_amd_mixed_attachment_samples) {
VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count = std::max(
max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
}
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count = std::max(
max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
}
if ((pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) &&
(max_sample_count != static_cast<VkSampleCountFlagBits>(0)) &&
(multisample_state->rasterizationSamples != max_sample_count)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01505",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max "
"attachment samples (%s) used in subpass %u.",
pipelineIndex, string_VkSampleCountFlagBits(multisample_state->rasterizationSamples),
string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass);
}
}
if (device_extensions.vk_nv_framebuffer_mixed_samples) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_color_samples = 0;
accum_color_samples(subpass_color_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
const uint32_t subpass_depth_samples =
static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
if (pPipeline->graphicsPipelineCI.pDepthStencilState) {
const bool ds_test_enabled =
(pPipeline->graphicsPipelineCI.pDepthStencilState->depthTestEnable == VK_TRUE) ||
(pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) ||
(pPipeline->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE);
if (ds_test_enabled && (!IsPowerOfTwo(subpass_depth_samples) || (raster_samples != subpass_depth_samples))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01411",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"does not match the number of samples of the RenderPass depth attachment (%u).",
pipelineIndex, raster_samples, subpass_depth_samples);
}
}
}
if (IsPowerOfTwo(subpass_color_samples)) {
if (raster_samples < subpass_color_samples) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01412",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"is not greater or equal to the number of samples of the RenderPass color attachment (%u).",
pipelineIndex, raster_samples, subpass_color_samples);
}
if (multisample_state) {
if ((raster_samples > subpass_color_samples) && (multisample_state->sampleShadingEnable == VK_TRUE)) {
skip |=
LogError(device, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->sampleShadingEnable must be "
"VK_FALSE when "
"pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) is greater than the number of "
"samples of the "
"subpass color attachment (%u).",
pipelineIndex, pipelineIndex, raster_samples, subpass_color_samples);
}
const auto *coverage_modulation_state =
LvlFindInChain<VkPipelineCoverageModulationStateCreateInfoNV>(multisample_state->pNext);
if (coverage_modulation_state && (coverage_modulation_state->coverageModulationTableEnable == VK_TRUE)) {
if (coverage_modulation_state->coverageModulationTableCount != (raster_samples / subpass_color_samples)) {
skip |= LogError(
device, "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405",
"vkCreateGraphicsPipelines: pCreateInfos[%d] VkPipelineCoverageModulationStateCreateInfoNV "
"coverageModulationTableCount of %u is invalid.",
pipelineIndex, coverage_modulation_state->coverageModulationTableCount);
}
}
}
}
}
if (device_extensions.vk_nv_coverage_reduction_mode) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_color_samples = 0;
uint32_t subpass_depth_samples = 0;
accum_color_samples(subpass_color_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_depth_samples = static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
if (multisample_state && IsPowerOfTwo(subpass_color_samples) &&
(subpass_depth_samples == 0 || IsPowerOfTwo(subpass_depth_samples))) {
const auto *coverage_reduction_state =
LvlFindInChain<VkPipelineCoverageReductionStateCreateInfoNV>(multisample_state->pNext);
if (coverage_reduction_state) {
const VkCoverageReductionModeNV coverage_reduction_mode = coverage_reduction_state->coverageReductionMode;
uint32_t combination_count = 0;
std::vector<VkFramebufferMixedSamplesCombinationNV> combinations;
DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count,
nullptr);
combinations.resize(combination_count);
DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count,
&combinations[0]);
bool combination_found = false;
for (const auto &combination : combinations) {
if (coverage_reduction_mode == combination.coverageReductionMode &&
raster_samples == combination.rasterizationSamples &&
subpass_depth_samples == combination.depthStencilSamples &&
subpass_color_samples == combination.colorSamples) {
combination_found = true;
break;
}
}
if (!combination_found) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-coverageReductionMode-02722",
"vkCreateGraphicsPipelines: pCreateInfos[%d] the specified combination of coverage "
"reduction mode (%s), pMultisampleState->rasterizationSamples (%u), sample counts for "
"the subpass color and depth/stencil attachments is not a valid combination returned by "
"vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV.",
pipelineIndex, string_VkCoverageReductionModeNV(coverage_reduction_mode));
}
}
}
}
if (device_extensions.vk_nv_fragment_coverage_to_color) {
const auto coverage_to_color_state = LvlFindInChain<VkPipelineCoverageToColorStateCreateInfoNV>(multisample_state);
if (coverage_to_color_state && coverage_to_color_state->coverageToColorEnable == VK_TRUE) {
bool attachment_is_valid = false;
std::string error_detail;
if (coverage_to_color_state->coverageToColorLocation < subpass_desc->colorAttachmentCount) {
const auto& color_attachment_ref =
subpass_desc->pColorAttachments[coverage_to_color_state->coverageToColorLocation];
if (color_attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
const auto& color_attachment = pPipeline->rp_state->createInfo.pAttachments[color_attachment_ref.attachment];
switch (color_attachment.format) {
case VK_FORMAT_R8_UINT:
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R16_UINT:
case VK_FORMAT_R16_SINT:
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32_SINT:
attachment_is_valid = true;
break;
default:
std::ostringstream str;
str << "references an attachment with an invalid format ("
<< string_VkFormat(color_attachment.format) << ").";
error_detail = str.str();
break;
}
} else {
std::ostringstream str;
str << "references an invalid attachment. The subpass pColorAttachments["
<< coverage_to_color_state->coverageToColorLocation
<< "].attachment has the value VK_ATTACHMENT_UNUSED.";
error_detail = str.str();
}
} else {
std::ostringstream str;
str << "references an non-existing attachment since the subpass colorAttachmentCount is "
<< subpass_desc->colorAttachmentCount << ".";
error_detail = str.str();
}
if (!attachment_is_valid) {
skip |= LogError(device, "VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRId32
"].pMultisampleState VkPipelineCoverageToColorStateCreateInfoNV "
"coverageToColorLocation = %" PRIu32 " %s",
pipelineIndex, coverage_to_color_state->coverageToColorLocation, error_detail.c_str());
}
}
}
if (device_extensions.vk_ext_sample_locations) {
const VkPipelineSampleLocationsStateCreateInfoEXT *sample_location_state =
LvlFindInChain<VkPipelineSampleLocationsStateCreateInfoEXT>(multisample_state->pNext);
if (sample_location_state != nullptr) {
if ((sample_location_state->sampleLocationsEnable == VK_TRUE) &&
(IsDynamic(pPipeline, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT) == false)) {
const VkSampleLocationsInfoEXT sample_location_info = sample_location_state->sampleLocationsInfo;
skip |= ValidateSampleLocationsInfo(&sample_location_info, "vkCreateGraphicsPipelines");
const VkExtent2D grid_size = sample_location_info.sampleLocationGridSize;
auto multisample_prop = LvlInitStruct<VkMultisamplePropertiesEXT>();
DispatchGetPhysicalDeviceMultisamplePropertiesEXT(physical_device, multisample_state->rasterizationSamples,
&multisample_prop);
const VkExtent2D max_grid_size = multisample_prop.maxSampleLocationGridSize;
// Note order or "divide" in "sampleLocationsInfo must evenly divide VkMultisamplePropertiesEXT"
if (SafeModulo(max_grid_size.width, grid_size.width) != 0) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location "
"and sampleLocationEnable is true, the "
"VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.width (%u) "
"must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.width (%u).",
pipelineIndex, grid_size.width, max_grid_size.width);
}
if (SafeModulo(max_grid_size.height, grid_size.height) != 0) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location "
"and sampleLocationEnable is true, the "
"VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.height (%u) "
"must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.height (%u).",
pipelineIndex, grid_size.height, max_grid_size.height);
}
if (sample_location_info.sampleLocationsPerPixel != multisample_state->rasterizationSamples) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01523",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location "
"and sampleLocationEnable is true, the "
"VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationsPerPixel (%s) must "
"be the same as the VkPipelineMultisampleStateCreateInfo::rasterizationSamples (%s).",
pipelineIndex, string_VkSampleCountFlagBits(sample_location_info.sampleLocationsPerPixel),
string_VkSampleCountFlagBits(multisample_state->rasterizationSamples));
}
}
}
}
}
skip |= ValidatePipelineCacheControlFlags(pPipeline->graphicsPipelineCI.flags, pipelineIndex, "vkCreateGraphicsPipelines",
"VUID-VkGraphicsPipelineCreateInfo-pipelineCreationCacheControl-02878");
// VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState &&
(IsDynamic(pPipeline, VK_DYNAMIC_STATE_CULL_MODE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_FRONT_FACE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_OP_EXT))) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378",
"vkCreateGraphicsPipelines: Extended dynamic state used by the extendedDynamicState feature is not enabled");
}
// VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04868
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2 &&
(IsDynamic(pPipeline, VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT))) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04868",
"vkCreateGraphicsPipelines: Extended dynamic state used by the extendedDynamicState2 feature is not enabled");
}
// VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04869
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2LogicOp &&
IsDynamic(pPipeline, VK_DYNAMIC_STATE_LOGIC_OP_EXT)) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04869",
"vkCreateGraphicsPipelines: Extended dynamic state used by the extendedDynamicState2LogicOp feature is not enabled");
}
// VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04870
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2PatchControlPoints &&
IsDynamic(pPipeline, VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04870",
"vkCreateGraphicsPipelines: Extended dynamic state used by the extendedDynamicState2PatchControlPoints "
"feature is not enabled");
}
const VkPipelineFragmentShadingRateStateCreateInfoKHR *fragment_shading_rate_state =
LvlFindInChain<VkPipelineFragmentShadingRateStateCreateInfoKHR>(pPipeline->graphicsPipelineCI.pNext);
if (fragment_shading_rate_state && !IsDynamic(pPipeline, VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR)) {
const char *struct_name = "VkPipelineFragmentShadingRateStateCreateInfoKHR";
if (fragment_shading_rate_state->fragmentSize.width == 0) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04494",
"vkCreateGraphicsPipelines: Fragment width of %u has been specified in %s.",
fragment_shading_rate_state->fragmentSize.width, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.height == 0) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04495",
"vkCreateGraphicsPipelines: Fragment height of %u has been specified in %s.",
fragment_shading_rate_state->fragmentSize.height, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.width != 0 &&
!IsPowerOfTwo(fragment_shading_rate_state->fragmentSize.width)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04496",
"vkCreateGraphicsPipelines: Non-power-of-two fragment width of %u has been specified in %s.",
fragment_shading_rate_state->fragmentSize.width, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.height != 0 &&
!IsPowerOfTwo(fragment_shading_rate_state->fragmentSize.height)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04497",
"vkCreateGraphicsPipelines: Non-power-of-two fragment height of %u has been specified in %s.",
fragment_shading_rate_state->fragmentSize.height, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.width > 4) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04498",
"vkCreateGraphicsPipelines: Fragment width of %u specified in %s is too large.",
fragment_shading_rate_state->fragmentSize.width, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.height > 4) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04499",
"vkCreateGraphicsPipelines: Fragment height of %u specified in %s is too large",
fragment_shading_rate_state->fragmentSize.height, struct_name);
}
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate &&
fragment_shading_rate_state->fragmentSize.width != 1) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500",
"vkCreateGraphicsPipelines: Pipeline fragment width of %u has been specified in %s, but "
"pipelineFragmentShadingRate is not enabled",
fragment_shading_rate_state->fragmentSize.width, struct_name);
}
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate &&
fragment_shading_rate_state->fragmentSize.height != 1) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500",
"vkCreateGraphicsPipelines: Pipeline fragment height of %u has been specified in %s, but "
"pipelineFragmentShadingRate is not enabled",
fragment_shading_rate_state->fragmentSize.height, struct_name);
}
if (!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate &&
fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04501",
"vkCreateGraphicsPipelines: First combiner operation of %s has been specified in %s, but "
"primitiveFragmentShadingRate is not enabled",
string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[0]), struct_name);
}
if (!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate &&
fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04502",
"vkCreateGraphicsPipelines: Second combiner operation of %s has been specified in %s, but "
"attachmentFragmentShadingRate is not enabled",
string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[1]), struct_name);
}
if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps &&
(fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR &&
fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506",
"vkCreateGraphicsPipelines: First combiner operation of %s has been specified in %s, but "
"fragmentShadingRateNonTrivialCombinerOps is not supported",
string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[0]), struct_name);
}
if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps &&
(fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR &&
fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506",
"vkCreateGraphicsPipelines: Second combiner operation of %s has been specified in %s, but "
"fragmentShadingRateNonTrivialCombinerOps is not supported",
string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[1]), struct_name);
}
}
// VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04807
if (!enabled_features.vertex_input_dynamic_state_features.vertexInputDynamicState &&
IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04807",
"The vertexInputDynamicState feature must be enabled to use the VK_DYNAMIC_STATE_VERTEX_INPUT_EXT dynamic state");
}
return skip;
}
// Block of code at start here specifically for managing/tracking DSs
// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
// func_str is the name of the calling function
// Return false if no errors occur
// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
bool CoreChecks::ValidateIdleDescriptorSet(VkDescriptorSet set, const char *func_str) const {
if (disabled[idle_descriptor_set]) return false;
bool skip = false;
auto set_node = setMap.find(set);
if (set_node != setMap.end()) {
// TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
if (set_node->second->in_use.load()) {
skip |= LogError(set, "VUID-vkFreeDescriptorSets-pDescriptorSets-00309",
"Cannot call %s() on %s that is in use by a command buffer.", func_str,
report_data->FormatHandle(set).c_str());
}
}
return skip;
}
// If a renderpass is active, verify that the given command type is appropriate for current subpass state
bool CoreChecks::ValidateCmdSubpassState(const CMD_BUFFER_STATE *pCB, const CMD_TYPE cmd_type) const {
if (!pCB->activeRenderPass) return false;
bool skip = false;
if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
(cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS &&
cmd_type != CMD_NEXTSUBPASS2 && cmd_type != CMD_ENDRENDERPASS2)) {
skip |= LogError(pCB->commandBuffer, kVUID_Core_DrawState_InvalidCommandBuffer,
"Commands cannot be called in a subpass using secondary command buffers.");
} else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
skip |= LogError(pCB->commandBuffer, kVUID_Core_DrawState_InvalidCommandBuffer,
"vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
}
return skip;
}
bool CoreChecks::ValidateCmdQueueFlags(const CMD_BUFFER_STATE *cb_node, const char *caller_name, VkQueueFlags required_flags,
const char *error_code) const {
auto pool = cb_node->command_pool.get();
if (pool) {
const uint32_t queue_family_index = pool->queueFamilyIndex;
const VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_family_index].queueFlags;
if (!(required_flags & queue_flags)) {
string required_flags_string;
for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_SPARSE_BINDING_BIT,
VK_QUEUE_PROTECTED_BIT}) {
if (flag & required_flags) {
if (required_flags_string.size()) {
required_flags_string += " or ";
}
required_flags_string += string_VkQueueFlagBits(flag);
}
}
return LogError(cb_node->commandBuffer, error_code,
"%s(): Called in command buffer %s which was allocated from the command pool %s which was created with "
"queueFamilyIndex %u which doesn't contain the required %s capability flags.",
caller_name, report_data->FormatHandle(cb_node->commandBuffer).c_str(),
report_data->FormatHandle(pool->commandPool).c_str(), queue_family_index,
required_flags_string.c_str());
}
}
return false;
}
bool CoreChecks::ValidateSampleLocationsInfo(const VkSampleLocationsInfoEXT *pSampleLocationsInfo, const char *apiName) const {
bool skip = false;
const VkSampleCountFlagBits sample_count = pSampleLocationsInfo->sampleLocationsPerPixel;
const uint32_t sample_total_size = pSampleLocationsInfo->sampleLocationGridSize.width *
pSampleLocationsInfo->sampleLocationGridSize.height * SampleCountSize(sample_count);
if (pSampleLocationsInfo->sampleLocationsCount != sample_total_size) {
skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsCount-01527",
"%s: VkSampleLocationsInfoEXT::sampleLocationsCount (%u) must equal grid width * grid height * pixel "
"sample rate which currently is (%u * %u * %u).",
apiName, pSampleLocationsInfo->sampleLocationsCount, pSampleLocationsInfo->sampleLocationGridSize.width,
pSampleLocationsInfo->sampleLocationGridSize.height, SampleCountSize(sample_count));
}
if ((phys_dev_ext_props.sample_locations_props.sampleLocationSampleCounts & sample_count) == 0) {
skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsPerPixel-01526",
"%s: VkSampleLocationsInfoEXT::sampleLocationsPerPixel of %s is not supported by the device, please check "
"VkPhysicalDeviceSampleLocationsPropertiesEXT::sampleLocationSampleCounts for valid sample counts.",
apiName, string_VkSampleCountFlagBits(sample_count));
}
return skip;
}
static char const *GetCauseStr(VulkanTypedHandle obj) {
if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
return "destroyed";
}
bool CoreChecks::ReportInvalidCommandBuffer(const CMD_BUFFER_STATE *cb_state, const char *call_source) const {
bool skip = false;
for (const auto &obj : cb_state->broken_bindings) {
const char *cause_str = GetCauseStr(obj);
string vuid;
std::ostringstream str;
str << kVUID_Core_DrawState_InvalidCommandBuffer << "-" << object_string[obj.type];
vuid = str.str();
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(obj);
skip |=
LogError(objlist, vuid, "You are adding %s to %s that is invalid because bound %s was %s.", call_source,
report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(obj).c_str(), cause_str);
}
return skip;
}
bool CoreChecks::ValidateIndirectCmd(VkCommandBuffer command_buffer, VkBuffer buffer, CMD_TYPE cmd_type,
const char *caller_name) const {
bool skip = false;
const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type);
const CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
const BUFFER_STATE *buffer_state = GetBufferState(buffer);
if ((cb_state != nullptr) && (buffer_state != nullptr)) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, caller_name, vuid.indirect_contiguous_memory);
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT, true, vuid.indirect_buffer_bit,
caller_name, "VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT");
if (cb_state->unprotected == false) {
skip |= LogError(cb_state->commandBuffer, vuid.indirect_protected_cb,
"%s: Indirect commands can't be used in protected command buffers.", caller_name);
}
}
return skip;
}
template <typename T1>
bool CoreChecks::ValidateDeviceMaskToPhysicalDeviceCount(uint32_t deviceMask, const T1 object, const char *VUID) const {
bool skip = false;
uint32_t count = 1 << physical_device_count;
if (count <= deviceMask) {
skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is invalid. Physical device count is %" PRIu32 ".", deviceMask,
physical_device_count);
}
return skip;
}
template <typename T1>
bool CoreChecks::ValidateDeviceMaskToZero(uint32_t deviceMask, const T1 object, const char *VUID) const {
bool skip = false;
if (deviceMask == 0) {
skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") must be non-zero.", deviceMask);
}
return skip;
}
template <typename T1>
bool CoreChecks::ValidateDeviceMaskToCommandBuffer(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const T1 object,
const char *VUID) const {
bool skip = false;
if ((deviceMask & pCB->initial_device_mask) != deviceMask) {
skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s initial device mask(0x%" PRIx32 ").",
deviceMask, report_data->FormatHandle(pCB->commandBuffer).c_str(), pCB->initial_device_mask);
}
return skip;
}
bool CoreChecks::ValidateDeviceMaskToRenderPass(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const char *VUID) const {
bool skip = false;
if ((deviceMask & pCB->active_render_pass_device_mask) != deviceMask) {
skip |= LogError(pCB->commandBuffer, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s device mask(0x%" PRIx32 ").",
deviceMask, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(),
pCB->active_render_pass_device_mask);
}
return skip;
}
// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
// render pass.
bool CoreChecks::InsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const {
bool inside = false;
if (pCB->activeRenderPass) {
inside = LogError(pCB->commandBuffer, msgCode, "%s: It is invalid to issue this call inside an active %s.", apiName,
report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str());
}
return inside;
}
// Flags validation error if the associated call is made outside a render pass. The apiName
// routine should ONLY be called inside a render pass.
bool CoreChecks::OutsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const {
bool outside = false;
if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
outside = LogError(pCB->commandBuffer, msgCode, "%s: This call must be issued inside an active render pass.", apiName);
}
return outside;
}
bool CoreChecks::ValidateQueueFamilyIndex(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t requested_queue_family,
const char *err_code, const char *cmd_name, const char *queue_family_var_name) const {
bool skip = false;
if (requested_queue_family >= pd_state->queue_family_known_count) {
const char *conditional_ext_cmd =
instance_extensions.vk_khr_get_physical_device_properties_2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : "";
skip |= LogError(pd_state->phys_device, err_code,
"%s: %s (= %" PRIu32
") is not less than any previously obtained pQueueFamilyPropertyCount from "
"vkGetPhysicalDeviceQueueFamilyProperties%s (i.e. is not less than %s).",
cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd,
std::to_string(pd_state->queue_family_known_count).c_str());
}
return skip;
}
// Verify VkDeviceQueueCreateInfos
bool CoreChecks::ValidateDeviceQueueCreateInfos(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t info_count,
const VkDeviceQueueCreateInfo *infos) const {
bool skip = false;
layer_data::unordered_set<uint32_t> queue_family_set;
for (uint32_t i = 0; i < info_count; ++i) {
const auto requested_queue_family = infos[i].queueFamilyIndex;
std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
skip |= ValidateQueueFamilyIndex(pd_state, requested_queue_family, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381",
"vkCreateDevice", queue_family_var_name.c_str());
if (queue_family_set.insert(requested_queue_family).second == false) {
skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372",
"CreateDevice(): %s (=%" PRIu32 ") is not unique within pQueueCreateInfos.",
queue_family_var_name.c_str(), requested_queue_family);
}
// Verify that requested queue count of queue family is known to be valid at this point in time
if (requested_queue_family < pd_state->queue_family_known_count) {
const auto requested_queue_count = infos[i].queueCount;
const bool queue_family_has_props = requested_queue_family < pd_state->queue_family_properties.size();
// spec guarantees at least one queue for each queue family
const uint32_t available_queue_count =
queue_family_has_props ? pd_state->queue_family_properties[requested_queue_family].queueCount : 1;
const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties_2
? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
: "";
if (requested_queue_count > available_queue_count) {
const std::string count_note =
queue_family_has_props
? "i.e. is not less than or equal to " +
std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount)
: "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained";
skip |= LogError(
pd_state->phys_device, "VUID-VkDeviceQueueCreateInfo-queueCount-00382",
"vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
"].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) const {
bool skip = false;
auto pd_state = GetPhysicalDeviceState(gpu);
// TODO: object_tracker should perhaps do this instead
// and it does not seem to currently work anyway -- the loader just crashes before this point
if (!pd_state) {
skip |= LogError(device, kVUID_Core_DevLimit_MustQueryCount,
"Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
} else {
skip |= ValidateDeviceQueueCreateInfos(pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
const VkPhysicalDeviceFragmentShadingRateFeaturesKHR *fragment_shading_rate_features =
LvlFindInChain<VkPhysicalDeviceFragmentShadingRateFeaturesKHR>(pCreateInfo->pNext);
if (fragment_shading_rate_features) {
const VkPhysicalDeviceShadingRateImageFeaturesNV *shading_rate_image_features =
LvlFindInChain<VkPhysicalDeviceShadingRateImageFeaturesNV>(pCreateInfo->pNext);
if (shading_rate_image_features && shading_rate_image_features->shadingRateImage) {
if (fragment_shading_rate_features->pipelineFragmentShadingRate) {
skip |= LogError(
pd_state->phys_device, "VUID-VkDeviceCreateInfo-shadingRateImage-04478",
"vkCreateDevice: Cannot enable shadingRateImage and pipelineFragmentShadingRate features simultaneously.");
}
if (fragment_shading_rate_features->primitiveFragmentShadingRate) {
skip |= LogError(
pd_state->phys_device, "VUID-VkDeviceCreateInfo-shadingRateImage-04479",
"vkCreateDevice: Cannot enable shadingRateImage and primitiveFragmentShadingRate features simultaneously.");
}
if (fragment_shading_rate_features->attachmentFragmentShadingRate) {
skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-shadingRateImage-04480",
"vkCreateDevice: Cannot enable shadingRateImage and attachmentFragmentShadingRate features "
"simultaneously.");
}
}
const VkPhysicalDeviceFragmentDensityMapFeaturesEXT *fragment_density_map_features =
LvlFindInChain<VkPhysicalDeviceFragmentDensityMapFeaturesEXT>(pCreateInfo->pNext);
if (fragment_density_map_features && fragment_density_map_features->fragmentDensityMap) {
if (fragment_shading_rate_features->pipelineFragmentShadingRate) {
skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-fragmentDensityMap-04481",
"vkCreateDevice: Cannot enable fragmentDensityMap and pipelineFragmentShadingRate features "
"simultaneously.");
}
if (fragment_shading_rate_features->primitiveFragmentShadingRate) {
skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-fragmentDensityMap-04482",
"vkCreateDevice: Cannot enable fragmentDensityMap and primitiveFragmentShadingRate features "
"simultaneously.");
}
if (fragment_shading_rate_features->attachmentFragmentShadingRate) {
skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-fragmentDensityMap-04483",
"vkCreateDevice: Cannot enable fragmentDensityMap and attachmentFragmentShadingRate features "
"simultaneously.");
}
}
}
}
return skip;
}
void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
// The state tracker sets up the device state
StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
// Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker refactor
// would be messier without.
// TODO: Find a good way to do this hooklessly.
ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation);
CoreChecks *core_checks = static_cast<CoreChecks *>(validation_data);
core_checks->SetSetImageViewInitialLayoutCallback(
[core_checks](CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &iv_state, VkImageLayout layout) -> void {
core_checks->SetImageViewInitialLayout(cb_node, iv_state, layout);
});
}
void CoreChecks::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
if (!device) return;
imageLayoutMap.clear();
StateTracker::PreCallRecordDestroyDevice(device, pAllocator);
}
bool CoreChecks::ValidateStageMaskHost(const Location &loc, VkPipelineStageFlags2KHR stageMask) const {
bool skip = false;
if ((stageMask & VK_PIPELINE_STAGE_HOST_BIT) != 0) {
const auto &vuid = sync_vuid_maps::GetQueueSubmitVUID(loc, sync_vuid_maps::SubmitError::kHostStageMask);
skip |= LogError(
device, vuid,
"%s stage mask must not include VK_PIPELINE_STAGE_HOST_BIT as the stage can't be invoked inside a command buffer.",
loc.Message().c_str());
}
return skip;
}
// Note: This function assumes that the global lock is held by the calling thread.
// For the given queue, verify the queue state up to the given seq number.
// Currently the only check is to make sure that if there are events to be waited on prior to
// a QueryReset, make sure that all such events have been signalled.
bool CoreChecks::VerifyQueueStateToSeq(const QUEUE_STATE *initial_queue, uint64_t initial_seq) const {
bool skip = false;
// sequence number we want to validate up to, per queue
layer_data::unordered_map<const QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
// sequence number we've completed validation for, per queue
layer_data::unordered_map<const QUEUE_STATE *, uint64_t> done_seqs;
std::vector<const QUEUE_STATE *> worklist{initial_queue};
while (worklist.size()) {
auto queue = worklist.back();
worklist.pop_back();
auto target_seq = target_seqs[queue];
auto seq = std::max(done_seqs[queue], queue->seq);
auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq
for (; seq < target_seq; ++sub_it, ++seq) {
for (auto &wait : sub_it->waitSemaphores) {
auto other_queue = GetQueueState(wait.queue);
if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here.
auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
// if this wait is for another queue, and covers new sequence
// numbers beyond what we've already validated, mark the new
// target seq and (possibly-re)add the queue to the worklist.
if (other_done_seq < other_target_seq) {
target_seqs[other_queue] = other_target_seq;
worklist.push_back(other_queue);
}
}
}
// finally mark the point we've now validated this queue to.
done_seqs[queue] = seq;
}
return skip;
}
// When the given fence is retired, verify outstanding queue operations through the point of the fence
bool CoreChecks::VerifyQueueStateToFence(VkFence fence) const {
auto fence_state = GetFenceState(fence);
if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
return VerifyQueueStateToSeq(GetQueueState(fence_state->signaler.first), fence_state->signaler.second);
}
return false;
}
bool CoreChecks::ValidateCommandBufferSimultaneousUse(const Location &loc, const CMD_BUFFER_STATE *pCB,
int current_submit_count) const {
using sync_vuid_maps::GetQueueSubmitVUID;
using sync_vuid_maps::SubmitError;
bool skip = false;
if ((pCB->in_use.load() || current_submit_count > 1) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
const auto &vuid = sync_vuid_maps::GetQueueSubmitVUID(loc, SubmitError::kCmdNotSimultaneous);
skip |= LogError(device, vuid, "%s %s is already in use and is not marked for simultaneous use.", loc.Message().c_str(),
report_data->FormatHandle(pCB->commandBuffer).c_str());
}
return skip;
}
bool CoreChecks::ValidateCommandBufferState(const CMD_BUFFER_STATE *cb_state, const char *call_source, int current_submit_count,
const char *vu_id) const {
bool skip = false;
if (disabled[command_buffer_state]) return skip;
// Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
(cb_state->submitCount + current_submit_count > 1)) {
skip |= LogError(cb_state->commandBuffer, kVUID_Core_DrawState_CommandBufferSingleSubmitViolation,
"%s was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
"times.",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), cb_state->submitCount + current_submit_count);
}
// Validate that cmd buffers have been updated
switch (cb_state->state) {
case CB_INVALID_INCOMPLETE:
case CB_INVALID_COMPLETE:
skip |= ReportInvalidCommandBuffer(cb_state, call_source);
break;
case CB_NEW:
skip |= LogError(cb_state->commandBuffer, vu_id, "%s used in the call to %s is unrecorded and contains no commands.",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source);
break;
case CB_RECORDING:
skip |= LogError(cb_state->commandBuffer, kVUID_Core_DrawState_NoEndCommandBuffer,
"You must call vkEndCommandBuffer() on %s before this call to %s!",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source);
break;
default: /* recorded */
break;
}
return skip;
}
// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
bool CoreChecks::ValidImageBufferQueue(const CMD_BUFFER_STATE *cb_node, const VulkanTypedHandle &object, uint32_t queueFamilyIndex,
uint32_t count, const uint32_t *indices) const {
bool found = false;
bool skip = false;
for (uint32_t i = 0; i < count; i++) {
if (indices[i] == queueFamilyIndex) {
found = true;
break;
}
}
if (!found) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(object);
skip = LogError(objlist, "VUID-vkQueueSubmit-pSubmits-04626",
"vkQueueSubmit: %s contains %s which was not created allowing concurrent access to "
"this queue family %d.",
report_data->FormatHandle(cb_node->commandBuffer).c_str(), report_data->FormatHandle(object).c_str(),
queueFamilyIndex);
}
return skip;
}
// Validate that queueFamilyIndices of primary command buffers match this queue
// Secondary command buffers were previously validated in vkCmdExecuteCommands().
bool CoreChecks::ValidateQueueFamilyIndices(const Location &loc, const CMD_BUFFER_STATE *pCB, VkQueue queue) const {
using sync_vuid_maps::GetQueueSubmitVUID;
using sync_vuid_maps::SubmitError;
bool skip = false;
auto pool = pCB->command_pool.get();
auto queue_state = GetQueueState(queue);
if (pool && queue_state) {
if (pool->queueFamilyIndex != queue_state->queueFamilyIndex) {
LogObjectList objlist(pCB->commandBuffer);
objlist.add(queue);
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kCmdWrongQueueFamily);
skip |= LogError(objlist, vuid,
"%s Primary %s created in queue family %d is being submitted on %s "
"from queue family %d.",
loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer).c_str(), pool->queueFamilyIndex,
report_data->FormatHandle(queue).c_str(), queue_state->queueFamilyIndex);
}
// Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
for (const auto &object : pCB->object_bindings) {
if (object.type == kVulkanObjectTypeImage) {
auto image_state = object.node ? (IMAGE_STATE *)object.node : GetImageState(object.Cast<VkImage>());
if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex,
image_state->createInfo.queueFamilyIndexCount,
image_state->createInfo.pQueueFamilyIndices);
}
} else if (object.type == kVulkanObjectTypeBuffer) {
auto buffer_state = object.node ? (BUFFER_STATE *)object.node : GetBufferState(object.Cast<VkBuffer>());
if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex,
buffer_state->createInfo.queueFamilyIndexCount,
buffer_state->createInfo.pQueueFamilyIndices);
}
}
}
}
return skip;
}
bool CoreChecks::ValidatePrimaryCommandBufferState(
const Location &loc, const CMD_BUFFER_STATE *pCB, int current_submit_count,
QFOTransferCBScoreboards<QFOImageTransferBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<QFOBufferTransferBarrier> *qfo_buffer_scoreboards) const {
using sync_vuid_maps::GetQueueSubmitVUID;
using sync_vuid_maps::SubmitError;
// Track in-use for resources off of primary and any secondary CBs
bool skip = false;
if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kSecondaryCmdInSubmit);
skip |= LogError(pCB->commandBuffer, vuid, "%s Command buffer %s must be allocated with VK_COMMAND_BUFFER_LEVEL_PRIMARY.",
loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer).c_str());
} else {
for (const auto *sub_cb : pCB->linkedCommandBuffers) {
skip |= ValidateQueuedQFOTransfers(sub_cb, qfo_image_scoreboards, qfo_buffer_scoreboards);
// TODO: replace with InvalidateCommandBuffers() at recording.
if ((sub_cb->primaryCommandBuffer != pCB->commandBuffer) &&
!(sub_cb->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
LogObjectList objlist(device);
objlist.add(pCB->commandBuffer);
objlist.add(sub_cb->commandBuffer);
objlist.add(sub_cb->primaryCommandBuffer);
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kSecondaryCmdNotSimultaneous);
skip |= LogError(objlist, vuid,
"%s %s was submitted with secondary %s but that buffer has subsequently been bound to "
"primary %s and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(sub_cb->commandBuffer).c_str(),
report_data->FormatHandle(sub_cb->primaryCommandBuffer).c_str());
}
}
}
// If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing on device
skip |= ValidateCommandBufferSimultaneousUse(loc, pCB, current_submit_count);
skip |= ValidateQueuedQFOTransfers(pCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
const char *vuid = loc.function == Func::vkQueueSubmit ? "VUID-vkQueueSubmit-pCommandBuffers-00072"
: "VUID-vkQueueSubmit2KHR-commandBuffer-03876";
skip |= ValidateCommandBufferState(pCB, loc.StringFunc().c_str(), current_submit_count, vuid);
return skip;
}
bool CoreChecks::ValidateFenceForSubmit(const FENCE_STATE *pFence, const char *inflight_vuid, const char *retired_vuid,
const char *func_name) const {
bool skip = false;
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->state == FENCE_INFLIGHT) {
skip |= LogError(pFence->fence, inflight_vuid, "%s: %s is already in use by another submission.", func_name,
report_data->FormatHandle(pFence->fence).c_str());
}
else if (pFence->state == FENCE_RETIRED) {
skip |= LogError(pFence->fence, retired_vuid,
"%s: %s submitted in SIGNALED state. Fences must be reset before being submitted", func_name,
report_data->FormatHandle(pFence->fence).c_str());
}
}
return skip;
}
void CoreChecks::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence,
VkResult result) {
StateTracker::PostCallRecordQueueSubmit(queue, submitCount, pSubmits, fence, result);
if (result != VK_SUCCESS) return;
// The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks.
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
auto cb_node = GetCBState(submit->pCommandBuffers[i]);
if (cb_node) {
for (auto *secondary_cmd_buffer : cb_node->linkedCommandBuffers) {
UpdateCmdBufImageLayouts(secondary_cmd_buffer);
RecordQueuedQFOTransfers(secondary_cmd_buffer);
}
UpdateCmdBufImageLayouts(cb_node);
RecordQueuedQFOTransfers(cb_node);
}
}
}
}
void CoreChecks::PostCallRecordQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits, VkFence fence,
VkResult result) {
StateTracker::PostCallRecordQueueSubmit2KHR(queue, submitCount, pSubmits, fence, result);
if (result != VK_SUCCESS) return;
// The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks.
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx];
for (uint32_t i = 0; i < submit->commandBufferInfoCount; i++) {
auto cb_node = GetCBState(submit->pCommandBufferInfos[i].commandBuffer);
if (cb_node) {
for (auto *secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
UpdateCmdBufImageLayouts(secondaryCmdBuffer);
RecordQueuedQFOTransfers(secondaryCmdBuffer);
}
UpdateCmdBufImageLayouts(cb_node);
RecordQueuedQFOTransfers(cb_node);
}
}
}
}
bool CoreChecks::SemaphoreWasSignaled(VkSemaphore semaphore) const {
for (auto &pair : queueMap) {
const QUEUE_STATE &queue_state = pair.second;
for (const auto &submission : queue_state.submissions) {
for (const auto &signal_semaphore : submission.signalSemaphores) {
if (signal_semaphore.semaphore == semaphore) {
return true;
}
}
}
}
return false;
}
struct SemaphoreSubmitState {
const CoreChecks *core;
VkQueueFlags queue_flags;
layer_data::unordered_set<VkSemaphore> signaled_semaphores;
layer_data::unordered_set<VkSemaphore> unsignaled_semaphores;
layer_data::unordered_set<VkSemaphore> internal_semaphores;
SemaphoreSubmitState(const CoreChecks *core_, VkQueueFlags queue_flags_) : core(core_), queue_flags(queue_flags_) {}
bool ValidateWaitSemaphore(const core_error::Location &loc, VkQueue queue, VkSemaphore semaphore, uint64_t value,
uint32_t device_Index) {
using sync_vuid_maps::GetQueueSubmitVUID;
using sync_vuid_maps::SubmitError;
bool skip = false;
LogObjectList objlist(semaphore);
objlist.add(queue);
const auto *pSemaphore = core->GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR &&
(pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled) && !core->SemaphoreWasSignaled(semaphore))) {
auto error = core->device_extensions.vk_khr_timeline_semaphore ? SubmitError::kTimelineCannotBeSignalled : SubmitError::kBinaryCannotBeSignalled;
const auto &vuid = GetQueueSubmitVUID(loc, error);
skip |= core->LogError(
objlist, pSemaphore->scope == kSyncScopeInternal ? vuid : kVUID_Core_DrawState_QueueForwardProgress,
"%s Queue %s is waiting on semaphore (%s) that has no way to be signaled.", loc.Message().c_str(),
core->report_data->FormatHandle(queue).c_str(), core->report_data->FormatHandle(semaphore).c_str());
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR && pSemaphore->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
return skip;
}
bool ValidateSignalSemaphore(const core_error::Location &loc, VkQueue queue, VkSemaphore semaphore, uint64_t value,
uint32_t deviceIndex) {
using sync_vuid_maps::GetQueueSubmitVUID;
using sync_vuid_maps::SubmitError;
bool skip = false;
LogObjectList objlist(semaphore);
objlist.add(queue);
const auto *pSemaphore = core->GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && value <= pSemaphore->payload) {
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemSmallValue);
skip |= core->LogError(objlist, vuid,
"%s signal value (0x%" PRIx64
") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64 ")",
loc.Message().c_str(), pSemaphore->payload, core->report_data->FormatHandle(queue).c_str(),
core->report_data->FormatHandle(semaphore).c_str(), value);
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR &&
(pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
objlist.add(pSemaphore->signaler.first);
skip |= core->LogError(objlist, kVUID_Core_DrawState_QueueForwardProgress,
"%s is signaling %s (%s) that was previously "
"signaled by %s but has not since been waited on by any queue.",
loc.Message().c_str(), core->report_data->FormatHandle(queue).c_str(),
core->report_data->FormatHandle(semaphore).c_str(),
core->report_data->FormatHandle(pSemaphore->signaler.first).c_str());
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
return skip;
}
};
bool CoreChecks::ValidateSemaphoresForSubmit(SemaphoreSubmitState &state, VkQueue queue, const VkSubmitInfo *submit,
const Location &outer_loc) const {
bool skip = false;
auto *timeline_semaphore_submit_info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(submit->pNext);
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
uint64_t value = 0;
uint32_t device_index = 0; // TODO:
VkSemaphore semaphore = submit->pWaitSemaphores[i];
LogObjectList objlist(semaphore);
objlist.add(queue);
if (submit->pWaitDstStageMask) {
auto loc = outer_loc.dot(Field::pWaitDstStageMask, i);
skip |= ValidatePipelineStage(objlist, loc, state.queue_flags, submit->pWaitDstStageMask[i]);
skip |= ValidateStageMaskHost(loc, submit->pWaitDstStageMask[i]);
}
const auto *semaphore_state = GetSemaphoreState(semaphore);
if (!semaphore_state) {
continue;
}
auto loc = outer_loc.dot(Field::pWaitSemaphores, i);
if (semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE) {
if (timeline_semaphore_submit_info == nullptr) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239",
"%s (%s) is a timeline semaphore, but VkSubmitInfo does "
"not include an instance of VkTimelineSemaphoreSubmitInfo",
loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str());
continue;
} else if (submit->waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03240",
"%s (%s) is a timeline semaphore, it contains an "
"instance of VkTimelineSemaphoreSubmitInfo, but waitSemaphoreValueCount (%u) is different than "
"waitSemaphoreCount (%u)",
loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->waitSemaphoreValueCount, submit->waitSemaphoreCount);
continue;
}
value = timeline_semaphore_submit_info->pWaitSemaphoreValues[i];
}
skip |= state.ValidateWaitSemaphore(outer_loc.dot(Field::pWaitSemaphores, i), queue, semaphore, value, device_index);
}
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
uint64_t value = 0;
uint32_t device_index = 0;
const auto *semaphore_state = GetSemaphoreState(semaphore);
if (!semaphore_state) {
continue;
}
auto loc = outer_loc.dot(Field::pSignalSemaphores, i);
if (semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE) {
if (timeline_semaphore_submit_info == nullptr) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239",
"%s (%s) is a timeline semaphore, but VkSubmitInfo"
"does not include an instance of VkTimelineSemaphoreSubmitInfo",
loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str());
continue;
} else if (submit->signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03241",
"%s (%s) is a timeline semaphore, it contains an "
"instance of VkTimelineSemaphoreSubmitInfo, but signalSemaphoreValueCount (%u) is different than "
"signalSemaphoreCount (%u)",
loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->signalSemaphoreValueCount, submit->signalSemaphoreCount);
continue;
}
value = timeline_semaphore_submit_info->pSignalSemaphoreValues[i];
}
skip |= state.ValidateSignalSemaphore(loc, queue, semaphore, value, device_index);
}
return skip;
}
bool CoreChecks::ValidateSemaphoresForSubmit(SemaphoreSubmitState &state, VkQueue queue, const VkSubmitInfo2KHR *submit,
const Location &outer_loc) const {
bool skip = false;
for (uint32_t i = 0; i < submit->waitSemaphoreInfoCount; ++i) {
const auto &sem_info = submit->pWaitSemaphoreInfos[i];
Location loc = outer_loc.dot(Field::pWaitSemaphoreInfos, i);
skip |= ValidatePipelineStage(LogObjectList(sem_info.semaphore), loc.dot(Field::stageMask), state.queue_flags,
sem_info.stageMask);
skip |= ValidateStageMaskHost(loc.dot(Field::stageMask), sem_info.stageMask);
skip |= state.ValidateWaitSemaphore(loc, queue, sem_info.semaphore, sem_info.value, sem_info.deviceIndex);
}
for (uint32_t i = 0; i < submit->signalSemaphoreInfoCount; ++i) {
const auto &sem_info = submit->pSignalSemaphoreInfos[i];
auto loc = outer_loc.dot(Field::pSignalSemaphoreInfos, i);
skip |= ValidatePipelineStage(LogObjectList(sem_info.semaphore), loc.dot(Field::stageMask), state.queue_flags,
sem_info.stageMask);
skip |= ValidateStageMaskHost(loc.dot(Field::stageMask), sem_info.stageMask);
skip |= state.ValidateSignalSemaphore(loc, queue, sem_info.semaphore, sem_info.value, sem_info.deviceIndex);
}
return skip;
}
bool CoreChecks::ValidateMaxTimelineSemaphoreValueDifference(const Location &loc, VkSemaphore semaphore, uint64_t value) const {
using sync_vuid_maps::GetQueueSubmitVUID;
using sync_vuid_maps::SubmitError;
bool skip = false;
const auto semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) return false;
uint64_t diff = value > semaphore_state->payload ? value - semaphore_state->payload : semaphore_state->payload - value;
if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) {
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff);
skip |= LogError(semaphore, vuid, "%s value exceeds limit regarding current semaphore %s payload", loc.Message().c_str(),
report_data->FormatHandle(semaphore).c_str());
}
for (auto &pair : queueMap) {
const QUEUE_STATE &queue_state = pair.second;
for (const auto &submission : queue_state.submissions) {
for (const auto &signal_semaphore : submission.signalSemaphores) {
if (signal_semaphore.semaphore == semaphore) {
diff = value > signal_semaphore.payload ? value - signal_semaphore.payload : signal_semaphore.payload - value;
if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) {
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff);
skip |= LogError(semaphore, vuid, "%s value exceeds limit regarding pending semaphore %s signal value",
loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str());
}
}
}
for (const auto &wait_semaphore : submission.waitSemaphores) {
if (wait_semaphore.semaphore == semaphore) {
diff = value > wait_semaphore.payload ? value - wait_semaphore.payload : wait_semaphore.payload - value;
if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) {
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff);
skip |= LogError(semaphore, vuid, "%s value exceeds limit regarding pending semaphore %s wait value",
loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str());
}
}
}
}
}
return skip;
}
struct CommandBufferSubmitState {
const CoreChecks *core;
const QUEUE_STATE *queue_state;
QFOTransferCBScoreboards<QFOImageTransferBarrier> qfo_image_scoreboards;
QFOTransferCBScoreboards<QFOBufferTransferBarrier> qfo_buffer_scoreboards;
vector<VkCommandBuffer> current_cmds;
GlobalImageLayoutMap overlay_image_layout_map;
QueryMap local_query_to_state_map;
EventToStageMap local_event_to_stage_map;
CommandBufferSubmitState(const CoreChecks *c, const char *func, const QUEUE_STATE *q) : core(c), queue_state(q) {}
bool Validate(const core_error::Location &loc, VkCommandBuffer cmd, uint32_t perf_pass) {
bool skip = false;
const auto *cb_node = core->GetCBState(cmd);
if (cb_node == nullptr) {
return skip;
}
skip |= core->ValidateCmdBufImageLayouts(cb_node, core->imageLayoutMap, overlay_image_layout_map);
current_cmds.push_back(cmd);
skip |= core->ValidatePrimaryCommandBufferState(loc, cb_node,
static_cast<int>(std::count(current_cmds.begin(), current_cmds.end(), cmd)),
&qfo_image_scoreboards, &qfo_buffer_scoreboards);
skip |= core->ValidateQueueFamilyIndices(loc, cb_node, queue_state->queue);
for (const auto &descriptor_set : cb_node->validate_descriptorsets_in_queuesubmit) {
const cvdescriptorset::DescriptorSet *set_node = core->GetSetNode(descriptor_set.first);
if (!set_node) {
continue;
}
for (const auto &cmd_info : descriptor_set.second) {
std::string function = loc.StringFunc();
function += ", ";
function += cmd_info.function;
for (const auto &binding_info : cmd_info.binding_infos) {
std::string error;
std::vector<uint32_t> dynamic_offsets;
// dynamic data isn't allowed in UPDATE_AFTER_BIND, so dynamicOffsets is always empty.
// This submit time not record time...
const bool record_time_validate = false;
layer_data::optional<layer_data::unordered_map<VkImageView, VkImageLayout>> checked_layouts;
if (set_node->GetTotalDescriptorCount() > cvdescriptorset::PrefilterBindRequestMap::kManyDescriptors_) {
checked_layouts.emplace();
}
skip |= core->ValidateDescriptorSetBindingData(
cb_node, set_node, dynamic_offsets, binding_info, cmd_info.framebuffer, cmd_info.attachments.get(),
*cmd_info.subpasses.get(), record_time_validate, function.c_str(),
core->GetDrawDispatchVuid(cmd_info.cmd_type), checked_layouts);
}
}
}
// Potential early exit here as bad object state may crash in delayed function calls
if (skip) {
return true;
}
// Call submit-time functions to validate or update local mirrors of state (to preserve const-ness at validate time)
for (auto &function : cb_node->queue_submit_functions) {
skip |= function(core, queue_state);
}
for (auto &function : cb_node->eventUpdates) {
skip |= function(core, /*do_validate*/ true, &local_event_to_stage_map);
}
VkQueryPool first_perf_query_pool = VK_NULL_HANDLE;
for (auto &function : cb_node->queryUpdates) {
skip |= function(core, /*do_validate*/ true, first_perf_query_pool, perf_pass, &local_query_to_state_map);
}
return skip;
}
};
bool CoreChecks::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
VkFence fence) const {
const auto *fence_state = GetFenceState(fence);
bool skip =
ValidateFenceForSubmit(fence_state, "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueSubmit-fence-00063", "vkQueueSubmit()");
if (skip) {
return true;
}
const auto queue_state = GetQueueState(queue);
CommandBufferSubmitState cb_submit_state(this, "vkQueueSubmit()", queue_state);
SemaphoreSubmitState sem_submit_state(
this, GetPhysicalDeviceState()->queue_family_properties[queue_state->queueFamilyIndex].queueFlags);
// Now verify each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
const auto perf_submit = LvlFindInChain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext);
uint32_t perf_pass = perf_submit ? perf_submit->counterPassIndex : 0;
Location loc(Func::vkQueueSubmit, Struct::VkSubmitInfo, Field::pSubmits, submit_idx);
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
skip |= cb_submit_state.Validate(loc.dot(Field::pCommandBuffers, i), submit->pCommandBuffers[i], perf_pass);
}
skip |= ValidateSemaphoresForSubmit(sem_submit_state, queue, submit, loc);
auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupSubmitInfo>(submit->pNext);
if (chained_device_group_struct && chained_device_group_struct->commandBufferCount > 0) {
for (uint32_t i = 0; i < chained_device_group_struct->commandBufferCount; ++i) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->pCommandBufferDeviceMasks[i], queue,
"VUID-VkDeviceGroupSubmitInfo-pCommandBufferDeviceMasks-00086");
}
}
auto protected_submit_info = LvlFindInChain<VkProtectedSubmitInfo>(submit->pNext);
if (protected_submit_info) {
const bool protected_submit = protected_submit_info->protectedSubmit == VK_TRUE;
// Only check feature once for submit
if ((protected_submit == true) && (enabled_features.core11.protectedMemory == VK_FALSE)) {
skip |= LogError(queue, "VUID-VkProtectedSubmitInfo-protectedSubmit-01816",
"vkQueueSubmit(): The protectedMemory device feature is disabled, can't submit a protected queue "
"to %s pSubmits[%u]",
report_data->FormatHandle(queue).c_str(), submit_idx);
}
// Make sure command buffers are all protected or unprotected
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
const CMD_BUFFER_STATE *cb_state = GetCBState(submit->pCommandBuffers[i]);
if (cb_state != nullptr) {
if ((cb_state->unprotected == true) && (protected_submit == true)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04148",
"vkQueueSubmit(): command buffer %s is unprotected while queue %s pSubmits[%u] has "
"VkProtectedSubmitInfo:protectedSubmit set to VK_TRUE",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(queue).c_str(), submit_idx);
}
if ((cb_state->unprotected == false) && (protected_submit == false)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04120",
"vkQueueSubmit(): command buffer %s is protected while queue %s pSubmits[%u] has "
"VkProtectedSubmitInfo:protectedSubmit set to VK_FALSE",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(queue).c_str(), submit_idx);
}
}
}
}
}
if (skip) return skip;
// Now verify maxTimelineSemaphoreValueDifference
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
Location loc(Func::vkQueueSubmit, Struct::VkSubmitInfo, Field::pSubmits, submit_idx);
const VkSubmitInfo *submit = &pSubmits[submit_idx];
auto *info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(submit->pNext);
if (info) {
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->waitSemaphoreValueCount) {
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pWaitSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::pWaitSemaphores, i), semaphore,
info->pWaitSemaphoreValues[i]);
}
}
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->signalSemaphoreValueCount) {
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::pSignalSemaphores, i), semaphore,
info->pSignalSemaphoreValues[i]);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits,
VkFence fence) const {
const auto *pFence = GetFenceState(fence);
bool skip = ValidateFenceForSubmit(pFence, "UNASSIGNED-CoreValidation-vkQueueSubmit2KHR-fence-00064",
"UNASSIGNED-vkQueueSubmit2KHR-fence-00063", "vkQueueSubmit2KHR()");
if (skip) {
return true;
}
const auto queue_state = GetQueueState(queue);
CommandBufferSubmitState cb_submit_state(this, "vkQueueSubmit2KHR()", queue_state);
SemaphoreSubmitState sem_submit_state(
this, GetPhysicalDeviceState()->queue_family_properties[queue_state->queueFamilyIndex].queueFlags);
// Now verify each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx];
const auto perf_submit = LvlFindInChain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext);
uint32_t perf_pass = perf_submit ? perf_submit->counterPassIndex : 0;
Location loc(Func::vkQueueSubmit2KHR, Struct::VkSubmitInfo2KHR, Field::pSubmits, submit_idx);
skip |= ValidateSemaphoresForSubmit(sem_submit_state, queue, submit, loc);
bool protectedSubmit = (submit->flags & VK_SUBMIT_PROTECTED_BIT_KHR) != 0;
// Only check feature once for submit
if ((protectedSubmit == true) && (enabled_features.core11.protectedMemory == VK_FALSE)) {
skip |= LogError(queue, "VUID-VkSubmitInfo2KHR-flags-03885",
"vkQueueSubmit2KHR(): The protectedMemory device feature is disabled, can't submit a protected queue "
"to %s pSubmits[%u]",
report_data->FormatHandle(queue).c_str(), submit_idx);
}
for (uint32_t i = 0; i < submit->commandBufferInfoCount; i++) {
auto info_loc = loc.dot(Field::pCommandBufferInfos, i);
info_loc.structure = Struct::VkCommandBufferSubmitInfoKHR;
skip |= cb_submit_state.Validate(info_loc.dot(Field::commandBuffer), submit->pCommandBufferInfos[i].commandBuffer,
perf_pass);
skip |= ValidateDeviceMaskToPhysicalDeviceCount(submit->pCommandBufferInfos[i].deviceMask, queue,
"VUID-VkCommandBufferSubmitInfoKHR-deviceMask-03891");
// Make sure command buffers are all protected or unprotected
const CMD_BUFFER_STATE *cb_state = GetCBState(submit->pCommandBufferInfos[i].commandBuffer);
if (cb_state != nullptr) {
if ((cb_state->unprotected == true) && (protectedSubmit == true)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkSubmitInfo2KHR-flags-03886",
"vkQueueSubmit2KHR(): command buffer %s is unprotected while queue %s pSubmits[%u] has "
"VK_SUBMIT_PROTECTED_BIT_KHR set",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(queue).c_str(), submit_idx);
}
if ((cb_state->unprotected == false) && (protectedSubmit == false)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkSubmitInfo2KHR-flags-03887",
"vkQueueSubmit2KHR(): command buffer %s is protected while queue %s pSubmitInfos[%u] has "
"VK_SUBMIT_PROTECTED_BIT_KHR not set",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(queue).c_str(), submit_idx);
}
}
}
}
if (skip) return skip;
// Now verify maxTimelineSemaphoreValueDifference
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx];
Location outer_loc(Func::vkQueueSubmit2KHR, Struct::VkSubmitInfo2KHR, Field::pSubmits, submit_idx);
// If there are any timeline semaphores, this condition gets checked before the early return above
for (uint32_t i = 0; i < submit->waitSemaphoreInfoCount; ++i) {
const auto *sem_info = &submit->pWaitSemaphoreInfos[i];
auto loc = outer_loc.dot(Field::pWaitSemaphoreInfos, i);
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::semaphore), sem_info->semaphore, sem_info->value);
}
// If there are any timeline semaphores, this condition gets checked before the early return above
for (uint32_t i = 0; i < submit->signalSemaphoreInfoCount; ++i) {
const auto *sem_info = &submit->pSignalSemaphoreInfos[i];
auto loc = outer_loc.dot(Field::pSignalSemaphoreInfos, i);
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::semaphore), sem_info->semaphore, sem_info->value);
}
}
return skip;
}
#ifdef AHB_VALIDATION_SUPPORT
// Android-specific validation that uses types defined only on Android and only for NDK versions
// that support the VK_ANDROID_external_memory_android_hardware_buffer extension.
// This chunk could move into a seperate core_validation_android.cpp file... ?
// clang-format off
// Map external format and usage flags to/from equivalent Vulkan flags
// (Tables as of v1.1.92)
// AHardwareBuffer Format Vulkan Format
// ====================== =============
// AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM VK_FORMAT_R8G8B8A8_UNORM
// AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM VK_FORMAT_R8G8B8A8_UNORM
// AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM VK_FORMAT_R8G8B8_UNORM
// AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM VK_FORMAT_R5G6B5_UNORM_PACK16
// AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT VK_FORMAT_R16G16B16A16_SFLOAT
// AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM VK_FORMAT_A2B10G10R10_UNORM_PACK32
// AHARDWAREBUFFER_FORMAT_D16_UNORM VK_FORMAT_D16_UNORM
// AHARDWAREBUFFER_FORMAT_D24_UNORM VK_FORMAT_X8_D24_UNORM_PACK32
// AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT VK_FORMAT_D24_UNORM_S8_UINT
// AHARDWAREBUFFER_FORMAT_D32_FLOAT VK_FORMAT_D32_SFLOAT
// AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT VK_FORMAT_D32_SFLOAT_S8_UINT
// AHARDWAREBUFFER_FORMAT_S8_UINT VK_FORMAT_S8_UINT
// The AHARDWAREBUFFER_FORMAT_* are an enum in the NDK headers, but get passed in to Vulkan
// as uint32_t. Casting the enums here avoids scattering casts around in the code.
std::map<uint32_t, VkFormat> ahb_format_map_a2v = {
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM, VK_FORMAT_R8G8B8A8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM, VK_FORMAT_R5G6B5_UNORM_PACK16 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT, VK_FORMAT_R16G16B16A16_SFLOAT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM, VK_FORMAT_A2B10G10R10_UNORM_PACK32 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D16_UNORM, VK_FORMAT_D16_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM, VK_FORMAT_X8_D24_UNORM_PACK32 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT, VK_FORMAT_D32_SFLOAT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_S8_UINT, VK_FORMAT_S8_UINT }
};
// AHardwareBuffer Usage Vulkan Usage or Creation Flag (Intermixed - Aargh!)
// ===================== ===================================================
// None VK_IMAGE_USAGE_TRANSFER_SRC_BIT
// None VK_IMAGE_USAGE_TRANSFER_DST_BIT
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_SAMPLED_BIT
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT
// AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE None
// AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT VK_IMAGE_CREATE_PROTECTED_BIT
// None VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
// None VK_IMAGE_CREATE_EXTENDED_USAGE_BIT
// Same casting rationale. De-mixing the table to prevent type confusion and aliasing
std::map<uint64_t, VkImageUsageFlags> ahb_usage_map_a2v = {
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE, (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent
};
std::map<uint64_t, VkImageCreateFlags> ahb_create_map_a2v = {
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT, VK_IMAGE_CREATE_PROTECTED_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent
};
std::map<VkImageUsageFlags, uint64_t> ahb_usage_map_v2a = {
{ VK_IMAGE_USAGE_SAMPLED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
{ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
{ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER },
{ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER },
};
std::map<VkImageCreateFlags, uint64_t> ahb_create_map_v2a = {
{ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP },
{ VK_IMAGE_CREATE_PROTECTED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT },
};
// clang-format on
//
// AHB-extension new APIs
//
bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID(
VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const {
bool skip = false;
// buffer must be a valid Android hardware buffer object with at least one of the AHARDWAREBUFFER_USAGE_GPU_* usage flags.
AHardwareBuffer_Desc ahb_desc;
AHardwareBuffer_describe(buffer, &ahb_desc);
uint32_t required_flags = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER |
AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
if (0 == (ahb_desc.usage & required_flags)) {
skip |= LogError(device, "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884",
"vkGetAndroidHardwareBufferPropertiesANDROID: The AHardwareBuffer's AHardwareBuffer_Desc.usage (0x%" PRIx64
") does not have any AHARDWAREBUFFER_USAGE_GPU_* flags set.",
ahb_desc.usage);
}
return skip;
}
bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer **pBuffer) const {
bool skip = false;
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory);
// VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID must have been included in
// VkExportMemoryAllocateInfo::handleTypes when memory was created.
if (!mem_info->is_export ||
(0 == (mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
skip |= LogError(device, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882",
"vkGetMemoryAndroidHardwareBufferANDROID: %s was not allocated for export, or the "
"export handleTypes (0x%" PRIx32
") did not contain VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.",
report_data->FormatHandle(pInfo->memory).c_str(), mem_info->export_handle_type_flags);
}
// If the pNext chain of the VkMemoryAllocateInfo used to allocate memory included a VkMemoryDedicatedAllocateInfo
// with non-NULL image member, then that image must already be bound to memory.
if (mem_info->is_dedicated && (VK_NULL_HANDLE != mem_info->dedicated_image)) {
const auto image_state = GetImageState(mem_info->dedicated_image);
// count() requires DEVICE_MEMORY_STATE* const & or DEVICE_MEMORY_STATE*, not const DEVICE_MEMORY_STATE*.
// But here is in a const function. It could get const DEVICE_MEMORY_STATE* only, so cast it.
if ((nullptr == image_state) || (0 == (image_state->GetBoundMemory().count((DEVICE_MEMORY_STATE *)mem_info)))) {
LogObjectList objlist(device);
objlist.add(pInfo->memory);
objlist.add(mem_info->dedicated_image);
skip |= LogError(objlist, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883",
"vkGetMemoryAndroidHardwareBufferANDROID: %s was allocated using a dedicated "
"%s, but that image is not bound to the VkDeviceMemory object.",
report_data->FormatHandle(pInfo->memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str());
}
}
return skip;
}
//
// AHB-specific validation within non-AHB APIs
//
bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const {
bool skip = false;
auto import_ahb_info = LvlFindInChain<VkImportAndroidHardwareBufferInfoANDROID>(alloc_info->pNext);
auto exp_mem_alloc_info = LvlFindInChain<VkExportMemoryAllocateInfo>(alloc_info->pNext);
auto mem_ded_alloc_info = LvlFindInChain<VkMemoryDedicatedAllocateInfo>(alloc_info->pNext);
if ((import_ahb_info) && (NULL != import_ahb_info->buffer)) {
// This is an import with handleType of VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID
AHardwareBuffer_Desc ahb_desc = {};
AHardwareBuffer_describe(import_ahb_info->buffer, &ahb_desc);
// Validate AHardwareBuffer_Desc::usage is a valid usage for imported AHB
//
// BLOB & GPU_DATA_BUFFER combo specifically allowed
if ((AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
// Otherwise, must be a combination from the AHardwareBuffer Format and Usage Equivalence tables
// Usage must have at least one bit from the table. It may have additional bits not in the table
uint64_t ahb_equiv_usage_bits = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER |
AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
if (0 == (ahb_desc.usage & ahb_equiv_usage_bits)) {
skip |=
LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881",
"vkAllocateMemory: The AHardwareBuffer_Desc's usage (0x%" PRIx64 ") is not compatible with Vulkan.",
ahb_desc.usage);
}
}
// Collect external buffer info
auto pdebi = LvlInitStruct<VkPhysicalDeviceExternalBufferInfo>();
pdebi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
}
if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) {
pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER];
}
auto ext_buf_props = LvlInitStruct<VkExternalBufferProperties>();
DispatchGetPhysicalDeviceExternalBufferProperties(physical_device, &pdebi, &ext_buf_props);
// If buffer is not NULL, Android hardware buffers must be supported for import, as reported by
// VkExternalImageFormatProperties or VkExternalBufferProperties.
if (0 == (ext_buf_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) {
// Collect external format info
auto pdeifi = LvlInitStruct<VkPhysicalDeviceExternalImageFormatInfo>();
pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
auto pdifi2 = LvlInitStruct<VkPhysicalDeviceImageFormatInfo2>(&pdeifi);
if (0 < ahb_format_map_a2v.count(ahb_desc.format)) pdifi2.format = ahb_format_map_a2v[ahb_desc.format];
pdifi2.type = VK_IMAGE_TYPE_2D; // Seems likely
pdifi2.tiling = VK_IMAGE_TILING_OPTIMAL; // Ditto
if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
}
if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) {
pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER];
}
if (AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP & ahb_desc.usage) {
pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP];
}
if (AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT & ahb_desc.usage) {
pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT];
}
auto ext_img_fmt_props = LvlInitStruct<VkExternalImageFormatProperties>();
auto ifp2 = LvlInitStruct<VkImageFormatProperties2>(&ext_img_fmt_props);
VkResult fmt_lookup_result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &pdifi2, &ifp2);
if ((VK_SUCCESS != fmt_lookup_result) || (0 == (ext_img_fmt_props.externalMemoryProperties.externalMemoryFeatures &
VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT))) {
skip |= LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01880",
"vkAllocateMemory: Neither the VkExternalImageFormatProperties nor the VkExternalBufferProperties "
"structs for the AHardwareBuffer include the VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT flag.");
}
}
// Retrieve buffer and format properties of the provided AHardwareBuffer
auto ahb_format_props = LvlInitStruct<VkAndroidHardwareBufferFormatPropertiesANDROID>();
auto ahb_props = LvlInitStruct<VkAndroidHardwareBufferPropertiesANDROID>(&ahb_format_props);
DispatchGetAndroidHardwareBufferPropertiesANDROID(device, import_ahb_info->buffer, &ahb_props);
// allocationSize must be the size returned by vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware buffer
if (alloc_info->allocationSize != ahb_props.allocationSize) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-02383",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct, allocationSize (%" PRId64
") does not match the AHardwareBuffer's reported allocationSize (%" PRId64 ").",
alloc_info->allocationSize, ahb_props.allocationSize);
}
// memoryTypeIndex must be one of those returned by vkGetAndroidHardwareBufferPropertiesANDROID for the AHardwareBuffer
// Note: memoryTypeIndex is an index, memoryTypeBits is a bitmask
uint32_t mem_type_bitmask = 1 << alloc_info->memoryTypeIndex;
if (0 == (mem_type_bitmask & ahb_props.memoryTypeBits)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct, memoryTypeIndex (%" PRId32
") does not correspond to a bit set in AHardwareBuffer's reported "
"memoryTypeBits bitmask (0x%" PRIx32 ").",
alloc_info->memoryTypeIndex, ahb_props.memoryTypeBits);
}
// Checks for allocations without a dedicated allocation requirement
if ((nullptr == mem_ded_alloc_info) || (VK_NULL_HANDLE == mem_ded_alloc_info->image)) {
// the Android hardware buffer must have a format of AHARDWAREBUFFER_FORMAT_BLOB and a usage that includes
// AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER
if (((uint64_t)AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) ||
(0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-02384",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct without a dedicated allocation requirement, while the AHardwareBuffer_Desc's format ( %u ) is not "
"AHARDWAREBUFFER_FORMAT_BLOB or usage (0x%" PRIx64 ") does not include AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER.",
ahb_desc.format, ahb_desc.usage);
}
} else { // Checks specific to import with a dedicated allocation requirement
const VkImageCreateInfo *ici = &(GetImageState(mem_ded_alloc_info->image)->createInfo);
// The Android hardware buffer's usage must include at least one of AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER or
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE
if (0 == (ahb_desc.usage & (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE))) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-02386",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID and a "
"dedicated allocation requirement, while the AHardwareBuffer's usage (0x%" PRIx64
") contains neither AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER nor AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE.",
ahb_desc.usage);
}
// the format of image must be VK_FORMAT_UNDEFINED or the format returned by
// vkGetAndroidHardwareBufferPropertiesANDROID
if ((ici->format != ahb_format_props.format) && (VK_FORMAT_UNDEFINED != ici->format)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02387",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
"format (%s) is not VK_FORMAT_UNDEFINED and does not match the AHardwareBuffer's format (%s).",
string_VkFormat(ici->format), string_VkFormat(ahb_format_props.format));
}
// The width, height, and array layer dimensions of image and the Android hardwarebuffer must be identical
if ((ici->extent.width != ahb_desc.width) || (ici->extent.height != ahb_desc.height) ||
(ici->arrayLayers != ahb_desc.layers)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02388",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
"width, height, and arrayLayers (%" PRId32 " %" PRId32 " %" PRId32
") do not match those of the AHardwareBuffer (%" PRId32 " %" PRId32 " %" PRId32 ").",
ici->extent.width, ici->extent.height, ici->arrayLayers, ahb_desc.width, ahb_desc.height,
ahb_desc.layers);
}
// If the Android hardware buffer's usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, the image must
// have either a full mipmap chain or exactly 1 mip level.
//
// NOTE! The language of this VUID contradicts the language in the spec (1.1.93), which says "The
// AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE flag does not correspond to a Vulkan image usage or creation flag. Instead,
// its presence indicates that the Android hardware buffer contains a complete mipmap chain, and its absence indicates
// that the Android hardware buffer contains only a single mip level."
//
// TODO: This code implements the VUID's meaning, but it seems likely that the spec text is actually correct.
// Clarification requested.
if ((ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE) && (ici->mipLevels != 1) &&
(ici->mipLevels != FullMipChainLevels(ici->extent))) {
skip |=
LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02389",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE but mipLevels (%" PRId32
") is neither 1 nor full mip "
"chain levels (%" PRId32 ").",
ici->mipLevels, FullMipChainLevels(ici->extent));
}
// each bit set in the usage of image must be listed in AHardwareBuffer Usage Equivalence, and if there is a
// corresponding AHARDWAREBUFFER_USAGE bit listed that bit must be included in the Android hardware buffer's
// AHardwareBuffer_Desc::usage
if (ici->usage & ~(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
skip |=
LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02390",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"dedicated image usage bits (0x%" PRIx64
") include an issue not listed in the AHardwareBuffer Usage Equivalence table.",
ici->usage);
}
std::vector<VkImageUsageFlags> usages = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT};
for (VkImageUsageFlags ubit : usages) {
if (ici->usage & ubit) {
uint64_t ahb_usage = ahb_usage_map_v2a[ubit];
if (0 == (ahb_usage & ahb_desc.usage)) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-02390",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"The dedicated image usage bit %s equivalent is not in AHardwareBuffer_Desc.usage (0x%" PRIx64 ") ",
string_VkImageUsageFlags(ubit).c_str(), ahb_desc.usage);
}
}
}
}
} else { // Not an import
if ((exp_mem_alloc_info) && (mem_ded_alloc_info) &&
(0 != (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID & exp_mem_alloc_info->handleTypes)) &&
(VK_NULL_HANDLE != mem_ded_alloc_info->image)) {
// This is an Android HW Buffer export
if (0 != alloc_info->allocationSize) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-01874",
"vkAllocateMemory: pNext chain indicates a dedicated Android Hardware Buffer export allocation, "
"but allocationSize is non-zero.");
}
} else {
if (0 == alloc_info->allocationSize) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-01874",
"vkAllocateMemory: pNext chain does not indicate a dedicated export allocation, but allocationSize is 0.");
};
}
}
return skip;
}
bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state != nullptr) {
if (image_state->external_ahb && (0 == image_state->GetBoundMemory().size())) {
const char *vuid = strcmp(func_name, "vkGetImageMemoryRequirements()") == 0
? "VUID-vkGetImageMemoryRequirements-image-04004"
: "VUID-VkImageMemoryRequirementsInfo2-image-01897";
skip |=
LogError(image, vuid,
"%s: Attempt get image memory requirements for an image created with a "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType, which has not yet been "
"bound to memory.",
func_name);
}
}
return skip;
}
bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const {
bool skip = false;
const VkAndroidHardwareBufferUsageANDROID *ahb_usage =
LvlFindInChain<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties->pNext);
if (nullptr != ahb_usage) {
const VkPhysicalDeviceExternalImageFormatInfo *pdeifi =
LvlFindInChain<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo->pNext);
if ((nullptr == pdeifi) || (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID != pdeifi->handleType)) {
skip |= LogError(device, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868",
"vkGetPhysicalDeviceImageFormatProperties2: pImageFormatProperties includes a chained "
"VkAndroidHardwareBufferUsageANDROID struct, but pImageFormatInfo does not include a chained "
"VkPhysicalDeviceExternalImageFormatInfo struct with handleType "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.");
}
}
return skip;
}
bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkBuffer buffer) const {
bool skip = false;
if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) {
const char *vuid = (strcmp(func_name, "vkBindBufferMemory()") == 0) ? "VUID-vkBindBufferMemory-memory-02986"
: "VUID-VkBindBufferMemoryInfo-memory-02986";
LogObjectList objlist(buffer);
objlist.add(memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkBuffer (%s) "
"VkExternalMemoryBufferreateInfo::handleType (%s)",
func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(buffer).c_str(),
string_VkExternalMemoryHandleTypeFlags(handleType).c_str());
}
return skip;
}
bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkImage image) const {
bool skip = false;
if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) {
const char *vuid = (strcmp(func_name, "vkBindImageMemory()") == 0) ? "VUID-vkBindImageMemory-memory-02990"
: "VUID-VkBindImageMemoryInfo-memory-02990";
LogObjectList objlist(image);
objlist.add(memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkImage (%s) "
"VkExternalMemoryImageCreateInfo::handleType (%s)",
func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(image).c_str(),
string_VkExternalMemoryHandleTypeFlags(handleType).c_str());
}
return skip;
}
#else // !AHB_VALIDATION_SUPPORT
// Case building for Android without AHB Validation
#ifdef VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID(
VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const {
return false;
}
bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer **pBuffer) const {
return false;
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const { return false; }
bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const {
return false;
}
bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const { return false; }
bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkBuffer buffer) const {
return false;
}
bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkImage image) const {
return false;
}
#endif // AHB_VALIDATION_SUPPORT
bool CoreChecks::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) const {
bool skip = false;
if (memObjMap.size() >= phys_dev_props.limits.maxMemoryAllocationCount) {
skip |= LogError(device, "VUID-vkAllocateMemory-maxMemoryAllocationCount-04101",
"vkAllocateMemory: Number of currently valid memory objects is not less than the maximum allowed (%u).",
phys_dev_props.limits.maxMemoryAllocationCount);
}
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateAllocateMemoryANDROID(pAllocateInfo);
} else {
if (0 == pAllocateInfo->allocationSize) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-00638", "vkAllocateMemory: allocationSize is 0.");
};
}
auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(pAllocateInfo->pNext);
if (chained_flags_struct && chained_flags_struct->flags == VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_flags_struct->deviceMask, device,
"VUID-VkMemoryAllocateFlagsInfo-deviceMask-00675");
skip |=
ValidateDeviceMaskToZero(chained_flags_struct->deviceMask, device, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00676");
}
if (pAllocateInfo->memoryTypeIndex >= phys_dev_mem_props.memoryTypeCount) {
skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01714",
"vkAllocateMemory: attempting to allocate memory type %u, which is not a valid index. Device only "
"advertises %u memory types.",
pAllocateInfo->memoryTypeIndex, phys_dev_mem_props.memoryTypeCount);
} else {
const VkMemoryType memory_type = phys_dev_mem_props.memoryTypes[pAllocateInfo->memoryTypeIndex];
if (pAllocateInfo->allocationSize > phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size) {
skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01713",
"vkAllocateMemory: attempting to allocate %" PRIu64
" bytes from heap %u,"
"but size of that heap is only %" PRIu64 " bytes.",
pAllocateInfo->allocationSize, memory_type.heapIndex,
phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size);
}
if (!enabled_features.device_coherent_memory_features.deviceCoherentMemory &&
((memory_type.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) != 0)) {
skip |= LogError(device, "VUID-vkAllocateMemory-deviceCoherentMemory-02790",
"vkAllocateMemory: attempting to allocate memory type %u, which includes the "
"VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD memory property, but the deviceCoherentMemory feature "
"is not enabled.",
pAllocateInfo->memoryTypeIndex);
}
if ((enabled_features.core11.protectedMemory == VK_FALSE) &&
((memory_type.propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-01872",
"vkAllocateMemory(): attempting to allocate memory type %u, which includes the "
"VK_MEMORY_PROPERTY_PROTECTED_BIT memory property, but the protectedMemory feature "
"is not enabled.",
pAllocateInfo->memoryTypeIndex);
}
}
bool imported_ahb = false;
#ifdef AHB_VALIDATION_SUPPORT
// "memory is not an imported Android Hardware Buffer" refers to VkImportAndroidHardwareBufferInfoANDROID with a non-NULL
// buffer value. Memory imported has another VUID to check size and allocationSize match up
auto imported_ahb_info = LvlFindInChain<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo->pNext);
if (imported_ahb_info != nullptr) {
imported_ahb = imported_ahb_info->buffer != nullptr;
}
#endif // AHB_VALIDATION_SUPPORT
auto dedicated_allocate_info = LvlFindInChain<VkMemoryDedicatedAllocateInfo>(pAllocateInfo->pNext);
if (dedicated_allocate_info) {
if ((dedicated_allocate_info->buffer != VK_NULL_HANDLE) && (dedicated_allocate_info->image != VK_NULL_HANDLE)) {
skip |= LogError(device, "VUID-VkMemoryDedicatedAllocateInfo-image-01432",
"vkAllocateMemory: Either buffer or image has to be VK_NULL_HANDLE in VkMemoryDedicatedAllocateInfo");
} else if (dedicated_allocate_info->image != VK_NULL_HANDLE) {
// Dedicated VkImage
const IMAGE_STATE *image_state = GetImageState(dedicated_allocate_info->image);
if (image_state->disjoint == true) {
skip |= LogError(
device, "VUID-VkMemoryDedicatedAllocateInfo-image-01797",
"vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with "
"VK_IMAGE_CREATE_DISJOINT_BIT",
report_data->FormatHandle(dedicated_allocate_info->image).c_str());
} else {
if ((pAllocateInfo->allocationSize != image_state->requirements.size) && (imported_ahb == false)) {
const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer)
? "VUID-VkMemoryDedicatedAllocateInfo-image-02964"
: "VUID-VkMemoryDedicatedAllocateInfo-image-01433";
skip |= LogError(
device, vuid,
"vkAllocateMemory: Allocation Size (%u) needs to be equal to VkImage %s VkMemoryRequirements::size (%u)",
pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->image).c_str(),
image_state->requirements.size);
}
if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) != 0) {
skip |= LogError(
device, "VUID-VkMemoryDedicatedAllocateInfo-image-01434",
"vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with "
"VK_IMAGE_CREATE_SPARSE_BINDING_BIT",
report_data->FormatHandle(dedicated_allocate_info->image).c_str());
}
}
} else if (dedicated_allocate_info->buffer != VK_NULL_HANDLE) {
// Dedicated VkBuffer
const BUFFER_STATE *buffer_state = GetBufferState(dedicated_allocate_info->buffer);
if ((pAllocateInfo->allocationSize != buffer_state->requirements.size) && (imported_ahb == false)) {
const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer)
? "VUID-VkMemoryDedicatedAllocateInfo-buffer-02965"
: "VUID-VkMemoryDedicatedAllocateInfo-buffer-01435";
skip |= LogError(
device, vuid,
"vkAllocateMemory: Allocation Size (%u) needs to be equal to VkBuffer %s VkMemoryRequirements::size (%u)",
pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->buffer).c_str(),
buffer_state->requirements.size);
}
if ((buffer_state->createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) != 0) {
skip |= LogError(
device, "VUID-VkMemoryDedicatedAllocateInfo-buffer-01436",
"vkAllocateMemory: VkBuffer %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with "
"VK_BUFFER_CREATE_SPARSE_BINDING_BIT",
report_data->FormatHandle(dedicated_allocate_info->buffer).c_str());
}
}
}
// TODO: VUIDs ending in 00643, 00644, 00646, 00647, 01742, 01743, 01745, 00645, 00648, 01744
return skip;
}
// For given obj node, if it is use, flag a validation error and return callback result, else return false
bool CoreChecks::ValidateObjectNotInUse(const BASE_NODE *obj_node, const VulkanTypedHandle &obj_struct, const char *caller_name,
const char *error_code) const {
if (disabled[object_in_use]) return false;
bool skip = false;
if (obj_node->in_use.load()) {
skip |= LogError(device, error_code, "Cannot call %s on %s that is currently in use by a command buffer.", caller_name,
report_data->FormatHandle(obj_struct).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) const {
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
const VulkanTypedHandle obj_struct(mem, kVulkanObjectTypeDeviceMemory);
bool skip = false;
if (mem_info) {
skip |= ValidateObjectNotInUse(mem_info, obj_struct, "vkFreeMemory", "VUID-vkFreeMemory-memory-00677");
}
return skip;
}
// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
// and that the size of the map range should be:
// 1. Not zero
// 2. Within the size of the memory allocation
bool CoreChecks::ValidateMapMemRange(const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize offset, VkDeviceSize size) const {
bool skip = false;
assert(mem_info);
const auto mem = mem_info->mem;
if (size == 0) {
skip = LogError(mem, "VUID-vkMapMemory-size-00680", "VkMapMemory: Attempting to map memory range of size zero");
}
// It is an application error to call VkMapMemory on an object that is already mapped
if (mem_info->mapped_range.size != 0) {
skip = LogError(mem, "VUID-vkMapMemory-memory-00678", "VkMapMemory: Attempting to map memory on an already-mapped %s.",
report_data->FormatHandle(mem).c_str());
}
// Validate offset is not over allocaiton size
if (offset >= mem_info->alloc_info.allocationSize) {
skip = LogError(mem, "VUID-vkMapMemory-offset-00679",
"VkMapMemory: Attempting to map memory with an offset of 0x%" PRIx64
" which is larger than the total array size 0x%" PRIx64,
offset, mem_info->alloc_info.allocationSize);
}
// Validate that offset + size is within object's allocationSize
if (size != VK_WHOLE_SIZE) {
if ((offset + size) > mem_info->alloc_info.allocationSize) {
skip = LogError(mem, "VUID-vkMapMemory-size-00681",
"VkMapMemory: Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64
".",
offset, size + offset, mem_info->alloc_info.allocationSize);
}
}
return skip;
}
bool CoreChecks::PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
uint64_t timeout) const {
// Verify fence status of submitted fences
bool skip = false;
for (uint32_t i = 0; i < fenceCount; i++) {
skip |= VerifyQueueStateToFence(pFences[i]);
}
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
VkQueue *pQueue) const {
bool skip = false;
skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex",
"VUID-vkGetDeviceQueue-queueFamilyIndex-00384");
const auto &queue_data = queue_family_index_map.find(queueFamilyIndex);
if ((queue_data != queue_family_index_map.end()) && (queue_data->second <= queueIndex)) {
skip |= LogError(device, "VUID-vkGetDeviceQueue-queueIndex-00385",
"vkGetDeviceQueue: queueIndex (=%" PRIu32
") is not less than the number of queues requested from queueFamilyIndex (=%" PRIu32
") when the device was created (i.e. is not less than %" PRIu32 ").",
queueIndex, queueFamilyIndex, queue_data->second);
}
const auto &queue_flags = queue_family_create_flags_map.find(queueFamilyIndex);
if ((queue_flags != queue_family_create_flags_map.end()) && (queue_flags->second != 0)) {
skip |= LogError(device, "VUID-vkGetDeviceQueue-flags-01841",
"vkGetDeviceQueue: queueIndex (=%" PRIu32
") was created with a non-zero VkDeviceQueueCreateFlags. Need to use vkGetDeviceQueue2 instead.",
queueIndex);
}
return skip;
}
bool CoreChecks::PreCallValidateQueueWaitIdle(VkQueue queue) const {
const QUEUE_STATE *queue_state = GetQueueState(queue);
return VerifyQueueStateToSeq(queue_state, queue_state->seq + queue_state->submissions.size());
}
bool CoreChecks::PreCallValidateDeviceWaitIdle(VkDevice device) const {
bool skip = false;
const auto &const_queue_map = queueMap;
for (auto &queue : const_queue_map) {
skip |= VerifyQueueStateToSeq(&queue.second, queue.second.seq + queue.second.submissions.size());
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) const {
bool skip = false;
auto *sem_type_create_info = LvlFindInChain<VkSemaphoreTypeCreateInfo>(pCreateInfo->pNext);
if (sem_type_create_info) {
if (sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE && !enabled_features.core12.timelineSemaphore) {
skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-timelineSemaphore-03252",
"VkCreateSemaphore: timelineSemaphore feature is not enabled, can not create timeline semaphores");
}
if (sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_BINARY && sem_type_create_info->initialValue != 0) {
skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-semaphoreType-03279",
"vkCreateSemaphore: if semaphoreType is VK_SEMAPHORE_TYPE_BINARY, initialValue must be zero");
}
}
return skip;
}
bool CoreChecks::PreCallValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout) const {
return ValidateWaitSemaphores(device, pWaitInfo, timeout, "VkWaitSemaphores");
}
bool CoreChecks::PreCallValidateWaitSemaphoresKHR(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout) const {
return ValidateWaitSemaphores(device, pWaitInfo, timeout, "VkWaitSemaphoresKHR");
}
bool CoreChecks::ValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout,
const char *apiName) const {
bool skip = false;
for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; i++) {
auto *semaphore_state = GetSemaphoreState(pWaitInfo->pSemaphores[i]);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) {
skip |= LogError(pWaitInfo->pSemaphores[i], "VUID-VkSemaphoreWaitInfo-pSemaphores-03256",
"%s(): all semaphores in pWaitInfo must be timeline semaphores, but %s is not", apiName,
report_data->FormatHandle(pWaitInfo->pSemaphores[i]).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) const {
const FENCE_STATE *fence_node = GetFenceState(fence);
bool skip = false;
if (fence_node) {
if (fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |= LogError(fence, "VUID-vkDestroyFence-fence-01120", "%s is in use.", report_data->FormatHandle(fence).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore,
const VkAllocationCallbacks *pAllocator) const {
const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore);
bool skip = false;
if (sema_node) {
skip |= ValidateObjectNotInUse(sema_node, obj_struct, "vkDestroySemaphore", "VUID-vkDestroySemaphore-semaphore-01137");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) const {
const EVENT_STATE *event_state = GetEventState(event);
const VulkanTypedHandle obj_struct(event, kVulkanObjectTypeEvent);
bool skip = false;
if (event_state) {
skip |= ValidateObjectNotInUse(event_state, obj_struct, "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool,
const VkAllocationCallbacks *pAllocator) const {
if (disabled[query_validation]) return false;
const QUERY_POOL_STATE *qp_state = GetQueryPoolState(queryPool);
const VulkanTypedHandle obj_struct(queryPool, kVulkanObjectTypeQueryPool);
bool skip = false;
if (qp_state) {
skip |= ValidateObjectNotInUse(qp_state, obj_struct, "vkDestroyQueryPool", "VUID-vkDestroyQueryPool-queryPool-00793");
}
return skip;
}
bool CoreChecks::ValidatePerformanceQueryResults(const char *cmd_name, const QUERY_POOL_STATE *query_pool_state,
uint32_t firstQuery, uint32_t queryCount, VkQueryResultFlags flags) const {
bool skip = false;
if (flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT | VK_QUERY_RESULT_PARTIAL_BIT | VK_QUERY_RESULT_64_BIT)) {
string invalid_flags_string;
for (auto flag : {VK_QUERY_RESULT_WITH_AVAILABILITY_BIT, VK_QUERY_RESULT_PARTIAL_BIT, VK_QUERY_RESULT_64_BIT}) {
if (flag & flags) {
if (invalid_flags_string.size()) {
invalid_flags_string += " and ";
}
invalid_flags_string += string_VkQueryResultFlagBits(flag);
}
}
skip |= LogError(query_pool_state->pool,
strcmp(cmd_name, "vkGetQueryPoolResults") == 0 ? "VUID-vkGetQueryPoolResults-queryType-03230"
: "VUID-vkCmdCopyQueryPoolResults-queryType-03233",
"%s: QueryPool %s was created with a queryType of"
"VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but flags contains %s.",
cmd_name, report_data->FormatHandle(query_pool_state->pool).c_str(), invalid_flags_string.c_str());
}
for (uint32_t query_index = firstQuery; query_index < queryCount; query_index++) {
uint32_t submitted = 0;
for (uint32_t pass_index = 0; pass_index < query_pool_state->n_performance_passes; pass_index++) {
QueryObject obj(QueryObject(query_pool_state->pool, query_index), pass_index);
auto query_pass_iter = queryToStateMap.find(obj);
if (query_pass_iter != queryToStateMap.end() && query_pass_iter->second == QUERYSTATE_AVAILABLE) submitted++;
}
if (submitted < query_pool_state->n_performance_passes) {
skip |= LogError(query_pool_state->pool, "VUID-vkGetQueryPoolResults-queryType-03231",
"%s: QueryPool %s has %u performance query passes, but the query has only been "
"submitted for %u of the passes.",
cmd_name, report_data->FormatHandle(query_pool_state->pool).c_str(),
query_pool_state->n_performance_passes, submitted);
}
}
return skip;
}
bool CoreChecks::ValidateGetQueryPoolPerformanceResults(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
void *pData, VkDeviceSize stride, VkQueryResultFlags flags,
const char *apiName) const {
bool skip = false;
const auto query_pool_state = GetQueryPoolState(queryPool);
if (!query_pool_state || query_pool_state->createInfo.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return skip;
if (((((uintptr_t)pData) % sizeof(VkPerformanceCounterResultKHR)) != 0 ||
(stride % sizeof(VkPerformanceCounterResultKHR)) != 0)) {
skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-03229",
"%s(): QueryPool %s was created with a queryType of "
"VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but pData & stride are not multiple of the "
"size of VkPerformanceCounterResultKHR.",
apiName, report_data->FormatHandle(queryPool).c_str());
}
skip |= ValidatePerformanceQueryResults(apiName, query_pool_state, firstQuery, queryCount, flags);
return skip;
}
bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
VkQueryResultFlags flags) const {
if (disabled[query_validation]) return false;
bool skip = false;
skip |= ValidateQueryPoolStride("VUID-vkGetQueryPoolResults-flags-02827", "VUID-vkGetQueryPoolResults-flags-00815", stride,
"dataSize", dataSize, flags);
skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkGetQueryPoolResults()",
"VUID-vkGetQueryPoolResults-firstQuery-00813", "VUID-vkGetQueryPoolResults-firstQuery-00816");
skip |=
ValidateGetQueryPoolPerformanceResults(queryPool, firstQuery, queryCount, pData, stride, flags, "vkGetQueryPoolResults");
const auto query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
skip |= LogError(
queryPool, "VUID-vkGetQueryPoolResults-queryType-00818",
"%s was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.",
report_data->FormatHandle(queryPool).c_str());
}
if (!skip) {
uint32_t query_avail_data = (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) ? 1 : 0;
uint32_t query_size_in_bytes = (flags & VK_QUERY_RESULT_64_BIT) ? sizeof(uint64_t) : sizeof(uint32_t);
uint32_t query_items = 0;
uint32_t query_size = 0;
switch (query_pool_state->createInfo.queryType) {
case VK_QUERY_TYPE_OCCLUSION:
// Occlusion queries write one integer value - the number of samples passed.
query_items = 1;
query_size = query_size_in_bytes * (query_items + query_avail_data);
break;
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
// Pipeline statistics queries write one integer value for each bit that is enabled in the pipelineStatistics
// when the pool is created
{
const int num_bits = sizeof(VkFlags) * CHAR_BIT;
std::bitset<num_bits> pipe_stats_bits(query_pool_state->createInfo.pipelineStatistics);
query_items = static_cast<uint32_t>(pipe_stats_bits.count());
query_size = query_size_in_bytes * (query_items + query_avail_data);
}
break;
case VK_QUERY_TYPE_TIMESTAMP:
// Timestamp queries write one integer
query_items = 1;
query_size = query_size_in_bytes * (query_items + query_avail_data);
break;
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
// Transform feedback queries write two integers
query_items = 2;
query_size = query_size_in_bytes * (query_items + query_avail_data);
break;
case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
// Performance queries store results in a tightly packed array of VkPerformanceCounterResultsKHR
query_items = query_pool_state->perf_counter_index_count;
query_size = sizeof(VkPerformanceCounterResultKHR) * query_items;
if (query_size > stride) {
skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-04519",
"vkGetQueryPoolResults() on querypool %s specified stride %" PRIu64
" which must be at least counterIndexCount (%d) "
"multiplied by sizeof(VkPerformanceCounterResultKHR) (%d).",
report_data->FormatHandle(queryPool).c_str(), stride, query_items,
sizeof(VkPerformanceCounterResultKHR));
}
break;
// These cases intentionally fall through to the default
case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR: // VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV
case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL:
default:
query_size = 0;
break;
}
if (query_size && (((queryCount - 1) * stride + query_size) > dataSize)) {
skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-dataSize-00817",
"vkGetQueryPoolResults() on querypool %s specified dataSize %zu which is "
"incompatible with the specified query type and options.",
report_data->FormatHandle(queryPool).c_str(), dataSize);
}
}
}
return skip;
}
bool CoreChecks::ValidateInsertMemoryRange(const VulkanTypedHandle &typed_handle, const DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize memoryOffset, const char *api_name) const {
bool skip = false;
if (memoryOffset >= mem_info->alloc_info.allocationSize) {
const char *error_code = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
if (strcmp(api_name, "vkBindBufferMemory()") == 0) {
error_code = "VUID-vkBindBufferMemory-memoryOffset-01031";
} else {
error_code = "VUID-VkBindBufferMemoryInfo-memoryOffset-01031";
}
} else if (typed_handle.type == kVulkanObjectTypeImage) {
if (strcmp(api_name, "vkBindImageMemory()") == 0) {
error_code = "VUID-vkBindImageMemory-memoryOffset-01046";
} else {
error_code = "VUID-VkBindImageMemoryInfo-memoryOffset-01046";
}
} else if (typed_handle.type == kVulkanObjectTypeAccelerationStructureNV) {
error_code = "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-03621";
} else {
// Unsupported object type
assert(false);
}
LogObjectList objlist(mem_info->mem);
objlist.add(typed_handle);
skip = LogError(objlist, error_code,
"In %s, attempting to bind %s to %s, memoryOffset=0x%" PRIxLEAST64
" must be less than the memory allocation size 0x%" PRIxLEAST64 ".",
api_name, report_data->FormatHandle(mem_info->mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
memoryOffset, mem_info->alloc_info.allocationSize);
}
return skip;
}
bool CoreChecks::ValidateInsertImageMemoryRange(VkImage image, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(image, kVulkanObjectTypeImage), mem_info, mem_offset, api_name);
}
bool CoreChecks::ValidateInsertBufferMemoryRange(VkBuffer buffer, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer), mem_info, mem_offset, api_name);
}
bool CoreChecks::ValidateInsertAccelerationStructureMemoryRange(VkAccelerationStructureNV as, const DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize mem_offset, const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(as, kVulkanObjectTypeAccelerationStructureNV), mem_info, mem_offset,
api_name);
}
bool CoreChecks::ValidateMemoryTypes(const DEVICE_MEMORY_STATE *mem_info, const uint32_t memory_type_bits, const char *funcName,
const char *msgCode) const {
bool skip = false;
if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
skip = LogError(mem_info->mem, msgCode,
"%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
"type (0x%X) of %s.",
funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
report_data->FormatHandle(mem_info->mem).c_str());
}
return skip;
}
bool CoreChecks::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
const char *api_name) const {
const BUFFER_STATE *buffer_state = GetBufferState(buffer);
bool bind_buffer_mem_2 = strcmp(api_name, "vkBindBufferMemory()") != 0;
bool skip = false;
if (buffer_state) {
// Track objects tied to memory
const VulkanTypedHandle obj_struct(buffer, kVulkanObjectTypeBuffer);
skip = ValidateSetMemBinding(mem, obj_struct, api_name);
const auto mem_info = GetDevMemState(mem);
// Validate memory requirements alignment
if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memoryOffset-01036" : "VUID-vkBindBufferMemory-memoryOffset-01036";
skip |= LogError(buffer, vuid,
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, memoryOffset, buffer_state->requirements.alignment);
}
if (mem_info) {
// Validate bound memory range information
skip |= ValidateInsertBufferMemoryRange(buffer, mem_info, memoryOffset, api_name);
const char *mem_type_vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01035" : "VUID-vkBindBufferMemory-memory-01035";
skip |= ValidateMemoryTypes(mem_info, buffer_state->requirements.memoryTypeBits, api_name, mem_type_vuid);
// Validate memory requirements size
if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-size-01037" : "VUID-vkBindBufferMemory-size-01037";
skip |= LogError(buffer, vuid,
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size);
}
// Validate dedicated allocation
if (mem_info->is_dedicated && (mem_info->dedicated_buffer != VK_NULL_HANDLE) &&
((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01508" : "VUID-vkBindBufferMemory-memory-01508";
LogObjectList objlist(buffer);
objlist.add(mem);
objlist.add(mem_info->dedicated_buffer);
skip |= LogError(objlist, vuid,
"%s: for dedicated %s, VkMemoryDedicatedAllocateInfo::buffer %s must be equal "
"to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
api_name, report_data->FormatHandle(mem).c_str(),
report_data->FormatHandle(mem_info->dedicated_buffer).c_str(),
report_data->FormatHandle(buffer).c_str(), memoryOffset);
}
auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext);
if (enabled_features.core12.bufferDeviceAddress &&
(buffer_state->createInfo.usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT) &&
(!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT))) {
skip |= LogError(buffer, "VUID-vkBindBufferMemory-bufferDeviceAddress-03339",
"%s: If buffer was created with the VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT bit set, "
"memory must have been allocated with the VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT bit set.",
api_name);
}
// Validate export memory handles
if ((mem_info->export_handle_type_flags != 0) &&
((mem_info->export_handle_type_flags & buffer_state->external_memory_handle) == 0)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-02726" : "VUID-vkBindBufferMemory-memory-02726";
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least one "
"handle from VkBuffer (%s) handleType %s.",
api_name, report_data->FormatHandle(mem).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(),
report_data->FormatHandle(buffer).c_str(),
string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str());
}
// Validate import memory handles
if (mem_info->is_import_ahb == true) {
skip |= ValidateBufferImportedHandleANDROID(api_name, buffer_state->external_memory_handle, mem, buffer);
} else if (mem_info->is_import == true) {
if ((mem_info->import_handle_type_flags & buffer_state->external_memory_handle) == 0) {
const char *vuid = nullptr;
if ((bind_buffer_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindBufferMemoryInfo-memory-02985";
} else if ((!bind_buffer_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindBufferMemory-memory-02985";
} else if ((bind_buffer_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindBufferMemoryInfo-memory-02727";
} else if ((!bind_buffer_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindBufferMemory-memory-02727";
}
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s which "
"is not set in the VkBuffer (%s) VkExternalMemoryBufferCreateInfo::handleType (%s)",
api_name, report_data->FormatHandle(mem).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(),
report_data->FormatHandle(buffer).c_str(),
string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str());
}
}
// Validate mix of protected buffer and memory
if ((buffer_state->unprotected == false) && (mem_info->unprotected == true)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01898" : "VUID-vkBindBufferMemory-None-01898";
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was not created with protected memory but the VkBuffer (%s) was set "
"to use protected memory.",
api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str());
} else if ((buffer_state->unprotected == true) && (mem_info->unprotected == false)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01899" : "VUID-vkBindBufferMemory-None-01899";
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with protected memory but the VkBuffer (%s) was not set "
"to use protected memory.",
api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem,
VkDeviceSize memoryOffset) const {
const char *api_name = "vkBindBufferMemory()";
return ValidateBindBufferMemory(buffer, mem, memoryOffset, api_name);
}
bool CoreChecks::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfo *pBindInfos) const {
char api_name[64];
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
bool CoreChecks::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfo *pBindInfos) const {
char api_name[64];
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i);
skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements(VkDevice device, VkImage image,
VkMemoryRequirements *pMemoryRequirements) const {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateGetImageMemoryRequirementsANDROID(image, "vkGetImageMemoryRequirements()");
}
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state) {
// Checks for no disjoint bit
if (image_state->disjoint == true) {
skip |= LogError(image, "VUID-vkGetImageMemoryRequirements-image-01588",
"vkGetImageMemoryRequirements(): %s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT "
"(need to use vkGetImageMemoryRequirements2).",
report_data->FormatHandle(image).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateGetImageMemoryRequirements2(const VkImageMemoryRequirementsInfo2 *pInfo, const char *func_name) const {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateGetImageMemoryRequirementsANDROID(pInfo->image, func_name);
}
const IMAGE_STATE *image_state = GetImageState(pInfo->image);
const VkFormat image_format = image_state->createInfo.format;
const VkImageTiling image_tiling = image_state->createInfo.tiling;
const VkImagePlaneMemoryRequirementsInfo *image_plane_info = LvlFindInChain<VkImagePlaneMemoryRequirementsInfo>(pInfo->pNext);
if ((FormatIsMultiplane(image_format)) && (image_state->disjoint == true) && (image_plane_info == nullptr)) {
skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01589",
"%s: %s image was created with a multi-planar format (%s) and "
"VK_IMAGE_CREATE_DISJOINT_BIT, but the current pNext doesn't include a "
"VkImagePlaneMemoryRequirementsInfo struct",
func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format));
}
if ((image_state->disjoint == false) && (image_plane_info != nullptr)) {
skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01590",
"%s: %s image was not created with VK_IMAGE_CREATE_DISJOINT_BIT,"
"but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct",
func_name, report_data->FormatHandle(pInfo->image).c_str());
}
if ((FormatIsMultiplane(image_format) == false) && (image_tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) &&
(image_plane_info != nullptr)) {
const char *vuid = device_extensions.vk_ext_image_drm_format_modifier ? "VUID-VkImageMemoryRequirementsInfo2-image-02280"
: "VUID-VkImageMemoryRequirementsInfo2-image-01591";
skip |= LogError(pInfo->image, vuid,
"%s: %s image is a single-plane format (%s) and does not have tiling of "
"VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT,"
"but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct",
func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format));
}
if (image_plane_info != nullptr) {
if ((image_tiling == VK_IMAGE_TILING_LINEAR) || (image_tiling == VK_IMAGE_TILING_OPTIMAL)) {
// Make sure planeAspect is only a single, valid plane
uint32_t planes = FormatPlaneCount(image_format);
VkImageAspectFlags aspect = image_plane_info->planeAspect;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) {
skip |= LogError(
pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281",
"%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT.",
func_name, report_data->FormatHandle(image_state->image).c_str(), string_VkImageAspectFlags(aspect).c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) {
skip |= LogError(
pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281",
"%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.",
func_name, report_data->FormatHandle(image_state->image).c_str(), string_VkImageAspectFlags(aspect).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) const {
return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2()");
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) const {
return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2KHR()");
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) const {
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties);
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) const {
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties);
return skip;
}
bool CoreChecks::PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline,
const VkAllocationCallbacks *pAllocator) const {
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
const VulkanTypedHandle obj_struct(pipeline, kVulkanObjectTypePipeline);
bool skip = false;
if (pipeline_state) {
skip |= ValidateObjectNotInUse(pipeline_state, obj_struct, "vkDestroyPipeline", "VUID-vkDestroyPipeline-pipeline-00765");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) const {
const SAMPLER_STATE *sampler_state = GetSamplerState(sampler);
const VulkanTypedHandle obj_struct(sampler, kVulkanObjectTypeSampler);
bool skip = false;
if (sampler_state) {
skip |= ValidateObjectNotInUse(sampler_state, obj_struct, "vkDestroySampler", "VUID-vkDestroySampler-sampler-01082");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator) const {
const DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool);
const VulkanTypedHandle obj_struct(descriptorPool, kVulkanObjectTypeDescriptorPool);
bool skip = false;
if (desc_pool_state) {
skip |= ValidateObjectNotInUse(desc_pool_state, obj_struct, "vkDestroyDescriptorPool",
"VUID-vkDestroyDescriptorPool-descriptorPool-00303");
}
return skip;
}
// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
// If this is a secondary command buffer, then make sure its primary is also in-flight
// If primary is not in-flight, then remove secondary from global in-flight set
// This function is only valid at a point when cmdBuffer is being reset or freed
bool CoreChecks::CheckCommandBufferInFlight(const CMD_BUFFER_STATE *cb_node, const char *action, const char *error_code) const {
bool skip = false;
if (cb_node->in_use.load()) {
skip |= LogError(cb_node->commandBuffer, error_code, "Attempt to %s %s which is in use.", action,
report_data->FormatHandle(cb_node->commandBuffer).c_str());
}
return skip;
}
// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
bool CoreChecks::CheckCommandBuffersInFlight(const COMMAND_POOL_STATE *pPool, const char *action, const char *error_code) const {
bool skip = false;
for (auto cmd_buffer : pPool->commandBuffers) {
skip |= CheckCommandBufferInFlight(GetCBState(cmd_buffer), action, error_code);
}
return skip;
}
bool CoreChecks::PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) const {
bool skip = false;
for (uint32_t i = 0; i < commandBufferCount; i++) {
const auto *cb_node = GetCBState(pCommandBuffers[i]);
// Delete CB information structure, and remove from commandBufferMap
if (cb_node) {
skip |= CheckCommandBufferInFlight(cb_node, "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) const {
bool skip = false;
skip |= ValidateDeviceQueueFamily(pCreateInfo->queueFamilyIndex, "vkCreateCommandPool", "pCreateInfo->queueFamilyIndex",
"VUID-vkCreateCommandPool-queueFamilyIndex-01937");
if ((enabled_features.core11.protectedMemory == VK_FALSE) &&
((pCreateInfo->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT) != 0)) {
skip |= LogError(device, "VUID-VkCommandPoolCreateInfo-flags-02860",
"vkCreateCommandPool(): the protectedMemory device feature is disabled: CommandPools cannot be created "
"with the VK_COMMAND_POOL_CREATE_PROTECTED_BIT set.");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) const {
if (disabled[query_validation]) return false;
bool skip = false;
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
if (!enabled_features.core.pipelineStatisticsQuery) {
skip |= LogError(device, "VUID-VkQueryPoolCreateInfo-queryType-00791",
"vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
"VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE.");
}
}
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (!enabled_features.performance_query_features.performanceCounterQueryPools) {
skip |=
LogError(device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-performanceCounterQueryPools-03237",
"vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created on a device with "
"VkPhysicalDevicePerformanceQueryFeaturesKHR.performanceCounterQueryPools == VK_FALSE.");
}
auto perf_ci = LvlFindInChain<VkQueryPoolPerformanceCreateInfoKHR>(pCreateInfo->pNext);
if (!perf_ci) {
skip |= LogError(
device, "VUID-VkQueryPoolCreateInfo-queryType-03222",
"vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created but the pNext chain of "
"pCreateInfo does not contain in instance of VkQueryPoolPerformanceCreateInfoKHR.");
} else {
const auto &perf_counter_iter = physical_device_state->perf_counters.find(perf_ci->queueFamilyIndex);
if (perf_counter_iter == physical_device_state->perf_counters.end()) {
skip |= LogError(
device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-queueFamilyIndex-03236",
"vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::queueFamilyIndex is not a valid queue family index.");
} else {
const QUEUE_FAMILY_PERF_COUNTERS *perf_counters = perf_counter_iter->second.get();
for (uint32_t idx = 0; idx < perf_ci->counterIndexCount; idx++) {
if (perf_ci->pCounterIndices[idx] >= perf_counters->counters.size()) {
skip |= LogError(
device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-pCounterIndices-03321",
"vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::pCounterIndices[%u] = %u is not a valid "
"counter index.",
idx, perf_ci->pCounterIndices[idx]);
}
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool,
const VkAllocationCallbacks *pAllocator) const {
const COMMAND_POOL_STATE *cp_state = GetCommandPoolState(commandPool);
bool skip = false;
if (cp_state) {
// Verify that command buffers in pool are complete (not in-flight)
skip |= CheckCommandBuffersInFlight(cp_state, "destroy command pool with", "VUID-vkDestroyCommandPool-commandPool-00041");
}
return skip;
}
bool CoreChecks::PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) const {
const auto *command_pool_state = GetCommandPoolState(commandPool);
return CheckCommandBuffersInFlight(command_pool_state, "reset command pool with", "VUID-vkResetCommandPool-commandPool-00040");
}
bool CoreChecks::PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) const {
bool skip = false;
for (uint32_t i = 0; i < fenceCount; ++i) {
const auto fence_state = GetFenceState(pFences[i]);
if (fence_state && fence_state->scope == kSyncScopeInternal && fence_state->state == FENCE_INFLIGHT) {
skip |= LogError(pFences[i], "VUID-vkResetFences-pFences-01123", "%s is in use.",
report_data->FormatHandle(pFences[i]).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer,
const VkAllocationCallbacks *pAllocator) const {
const FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer);
const VulkanTypedHandle obj_struct(framebuffer, kVulkanObjectTypeFramebuffer);
bool skip = false;
if (framebuffer_state) {
skip |= ValidateObjectNotInUse(framebuffer_state, obj_struct, "vkDestroyFramebuffer",
"VUID-vkDestroyFramebuffer-framebuffer-00892");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass,
const VkAllocationCallbacks *pAllocator) const {
const RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass);
const VulkanTypedHandle obj_struct(renderPass, kVulkanObjectTypeRenderPass);
bool skip = false;
if (rp_state) {
skip |= ValidateObjectNotInUse(rp_state, obj_struct, "vkDestroyRenderPass", "VUID-vkDestroyRenderPass-renderPass-00873");
}
return skip;
}
// Access helper functions for external modules
VkFormatProperties CoreChecks::GetPDFormatProperties(const VkFormat format) const {
VkFormatProperties format_properties;
DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &format_properties);
return format_properties;
}
bool CoreChecks::ValidatePipelineVertexDivisors(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pipe_state_vec,
const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) const {
bool skip = false;
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
for (uint32_t i = 0; i < count; i++) {
auto pvids_ci = (pipe_cis[i].pVertexInputState) ? LvlFindInChain<VkPipelineVertexInputDivisorStateCreateInfoEXT>(pipe_cis[i].pVertexInputState->pNext) : nullptr;
if (nullptr == pvids_ci) continue;
const PIPELINE_STATE *pipe_state = pipe_state_vec[i].get();
for (uint32_t j = 0; j < pvids_ci->vertexBindingDivisorCount; j++) {
const VkVertexInputBindingDivisorDescriptionEXT *vibdd = &(pvids_ci->pVertexBindingDivisors[j]);
if (vibdd->binding >= device_limits->maxVertexInputBindings) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] binding index of (%1u) exceeds device maxVertexInputBindings (%1u).",
i, j, vibdd->binding, device_limits->maxVertexInputBindings);
}
if (vibdd->divisor > phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor of (%1u) exceeds extension maxVertexAttribDivisor (%1u).",
i, j, vibdd->divisor, phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor);
}
if ((0 == vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateZeroDivisor) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor must not be 0 when vertexAttributeInstanceRateZeroDivisor feature is not "
"enabled.",
i, j);
}
if ((1 != vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateDivisor) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor (%1u) must be 1 when vertexAttributeInstanceRateDivisor feature is not "
"enabled.",
i, j, vibdd->divisor);
}
// Find the corresponding binding description and validate input rate setting
bool failed_01871 = true;
for (size_t k = 0; k < pipe_state->vertex_binding_descriptions_.size(); k++) {
if ((vibdd->binding == pipe_state->vertex_binding_descriptions_[k].binding) &&
(VK_VERTEX_INPUT_RATE_INSTANCE == pipe_state->vertex_binding_descriptions_[k].inputRate)) {
failed_01871 = false;
break;
}
}
if (failed_01871) { // Description not found, or has incorrect inputRate value
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] specifies binding index (%1u), but that binding index's "
"VkVertexInputBindingDescription.inputRate member is not VK_VERTEX_INPUT_RATE_INSTANCE.",
i, j, vibdd->binding);
}
}
}
return skip;
}
bool CoreChecks::ValidatePipelineCacheControlFlags(VkPipelineCreateFlags flags, uint32_t index, const char *caller_name,
const char *vuid) const {
bool skip = false;
if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) {
const VkPipelineCreateFlags invalid_flags =
VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT | VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT;
if ((flags & invalid_flags) != 0) {
skip |= LogError(device, vuid,
"%s(): pipelineCreationCacheControl is turned off but pipeline[%u] has VkPipelineCreateFlags "
"containing VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT or "
"VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT",
caller_name, index);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineCache *pPipelineCache) const {
bool skip = false;
if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) {
if ((pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT) != 0) {
skip |= LogError(device, "VUID-VkPipelineCacheCreateInfo-pipelineCreationCacheControl-02892",
"vkCreatePipelineCache(): pipelineCreationCacheControl is turned off but pCreateInfo::flags contains "
"VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *cgpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, cgpl_state_data);
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineLocked(cgpl_state->pipe_state, i);
}
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineUnlocked(cgpl_state->pipe_state[i].get(), i);
}
if (device_extensions.vk_ext_vertex_attribute_divisor) {
skip |= ValidatePipelineVertexDivisors(cgpl_state->pipe_state, count, pCreateInfos);
}
if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) {
for (uint32_t i = 0; i < count; ++i) {
// Validate depth-stencil state
auto raster_state_ci = pCreateInfos[i].pRasterizationState;
if ((VK_FALSE == enabled_features.portability_subset_features.separateStencilMaskRef) && raster_state_ci &&
(VK_CULL_MODE_NONE == raster_state_ci->cullMode)) {
auto depth_stencil_ci = pCreateInfos[i].pDepthStencilState;
if (depth_stencil_ci && (VK_TRUE == depth_stencil_ci->stencilTestEnable) &&
(depth_stencil_ci->front.reference != depth_stencil_ci->back.reference)) {
skip |= LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-separateStencilMaskRef-04453",
"Invalid Pipeline CreateInfo[%d] (portability error): VkStencilOpState::reference must be the "
"same for front and back",
i);
}
}
// Validate color attachments
auto color_blend_state = pCreateInfos[i].pColorBlendState;
if ((VK_FALSE == enabled_features.portability_subset_features.constantAlphaColorBlendFactors) && color_blend_state) {
const auto attachments = color_blend_state->pAttachments;
for (uint32_t color_attachment_index = 0; i < color_blend_state->attachmentCount; ++i) {
if ((VK_BLEND_FACTOR_CONSTANT_ALPHA == attachments[color_attachment_index].srcColorBlendFactor) ||
(VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA == attachments[color_attachment_index].srcColorBlendFactor)) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-constantAlphaColorBlendFactors-04454",
"Invalid Pipeline CreateInfo[%d] (portability error): srcColorBlendFactor for color attachment %d must "
"not be VK_BLEND_FACTOR_CONSTANT_ALPHA or VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA",
i, color_attachment_index);
}
if ((VK_BLEND_FACTOR_CONSTANT_ALPHA == attachments[color_attachment_index].dstColorBlendFactor) ||
(VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA == attachments[color_attachment_index].dstColorBlendFactor)) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-constantAlphaColorBlendFactors-04455",
"Invalid Pipeline CreateInfo[%d] (portability error): dstColorBlendFactor for color attachment %d must "
"not be VK_BLEND_FACTOR_CONSTANT_ALPHA or VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA",
i, color_attachment_index);
}
}
}
}
}
return skip;
}
void CoreChecks::PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
VkResult result, void *cgpl_state_data) {
ValidationStateTracker::PostCallRecordCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, result, cgpl_state_data);
if (enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate) {
for (uint32_t i = 0; i < count; i++) {
PIPELINE_STATE *pipeline_state = GetPipelineState(pPipelines[i]);
RecordGraphicsPipelineShaderDynamicState(pipeline_state);
}
}
}
bool CoreChecks::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *ccpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, ccpl_state_data);
auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data);
for (uint32_t i = 0; i < count; i++) {
// TODO: Add Compute Pipeline Verification
skip |= ValidateComputePipelineShaderState(ccpl_state->pipe_state[i].get());
skip |= ValidatePipelineCacheControlFlags(pCreateInfos->flags, i, "vkCreateComputePipelines",
"VUID-VkComputePipelineCreateInfo-pipelineCreationCacheControl-02875");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *crtpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, crtpl_state_data);
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data);
for (uint32_t i = 0; i < count; i++) {
PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get();
if (pipeline->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *base_pipeline = nullptr;
if (pipeline->raytracingPipelineCI.basePipelineIndex != -1) {
base_pipeline = crtpl_state->pipe_state[pipeline->raytracingPipelineCI.basePipelineIndex].get();
} else if (pipeline->raytracingPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
base_pipeline = GetPipelineState(pipeline->raytracingPipelineCI.basePipelineHandle);
}
if (!base_pipeline || !(base_pipeline->getPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= LogError(
device, "VUID-vkCreateRayTracingPipelinesNV-flags-03416",
"vkCreateRayTracingPipelinesNV: If the flags member of any element of pCreateInfos contains the "
"VK_PIPELINE_CREATE_DERIVATIVE_BIT flag,"
"the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set.");
}
}
skip |= ValidateRayTracingPipeline(pipeline, pCreateInfos[i].flags, /*isKHR*/ false);
skip |= ValidatePipelineCacheControlFlags(pCreateInfos[i].flags, i, "vkCreateRayTracingPipelinesNV",
"VUID-VkRayTracingPipelineCreateInfoNV-pipelineCreationCacheControl-02905");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRayTracingPipelinesKHR(VkDevice device, VkDeferredOperationKHR deferredOperation,
VkPipelineCache pipelineCache, uint32_t count,
const VkRayTracingPipelineCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *crtpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesKHR(device, deferredOperation, pipelineCache, count,
pCreateInfos, pAllocator, pPipelines, crtpl_state_data);
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_khr_api_state *>(crtpl_state_data);
for (uint32_t i = 0; i < count; i++) {
PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get();
if (pipeline->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *base_pipeline = nullptr;
if (pipeline->raytracingPipelineCI.basePipelineIndex != -1) {
base_pipeline = crtpl_state->pipe_state[pipeline->raytracingPipelineCI.basePipelineIndex].get();
} else if (pipeline->raytracingPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
base_pipeline = GetPipelineState(pipeline->raytracingPipelineCI.basePipelineHandle);
}
if (!base_pipeline || !(base_pipeline->getPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= LogError(
device, "VUID-vkCreateRayTracingPipelinesKHR-flags-03416",
"vkCreateRayTracingPipelinesKHR: If the flags member of any element of pCreateInfos contains the "
"VK_PIPELINE_CREATE_DERIVATIVE_BIT flag,"
"the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set.");
}
}
skip |= ValidateRayTracingPipeline(pipeline, pCreateInfos[i].flags, /*isKHR*/ true);
skip |= ValidatePipelineCacheControlFlags(pCreateInfos[i].flags, i, "vkCreateRayTracingPipelinesKHR",
"VUID-VkRayTracingPipelineCreateInfoKHR-pipelineCreationCacheControl-02905");
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutablePropertiesKHR(VkDevice device, const VkPipelineInfoKHR *pPipelineInfo,
uint32_t *pExecutableCount,
VkPipelineExecutablePropertiesKHR *pProperties) const {
bool skip = false;
if (!enabled_features.pipeline_exe_props_features.pipelineExecutableInfo) {
skip |= LogError(device, "VUID-vkGetPipelineExecutablePropertiesKHR-pipelineExecutableInfo-03270",
"vkGetPipelineExecutablePropertiesKHR called when pipelineExecutableInfo feature is not enabled.");
}
return skip;
}
bool CoreChecks::ValidatePipelineExecutableInfo(VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo) const {
bool skip = false;
if (!enabled_features.pipeline_exe_props_features.pipelineExecutableInfo) {
skip |= LogError(device, "VUID-vkGetPipelineExecutableStatisticsKHR-pipelineExecutableInfo-03272",
"vkGetPipelineExecutableStatisticsKHR called when pipelineExecutableInfo feature is not enabled.");
}
auto pi = LvlInitStruct<VkPipelineInfoKHR>();
pi.pipeline = pExecutableInfo->pipeline;
// We could probably cache this instead of fetching it every time
uint32_t executable_count = 0;
DispatchGetPipelineExecutablePropertiesKHR(device, &pi, &executable_count, NULL);
if (pExecutableInfo->executableIndex >= executable_count) {
skip |=
LogError(pExecutableInfo->pipeline, "VUID-VkPipelineExecutableInfoKHR-executableIndex-03275",
"VkPipelineExecutableInfo::executableIndex (%1u) must be less than the number of executables associated with "
"the pipeline (%1u) as returned by vkGetPipelineExecutablePropertiessKHR",
pExecutableInfo->executableIndex, executable_count);
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutableStatisticsKHR(VkDevice device,
const VkPipelineExecutableInfoKHR *pExecutableInfo,
uint32_t *pStatisticCount,
VkPipelineExecutableStatisticKHR *pStatistics) const {
bool skip = ValidatePipelineExecutableInfo(device, pExecutableInfo);
const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline);
if (!(pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR)) {
skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableStatisticsKHR-pipeline-03274",
"vkGetPipelineExecutableStatisticsKHR called on a pipeline created without the "
"VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR flag set");
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutableInternalRepresentationsKHR(
VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo, uint32_t *pInternalRepresentationCount,
VkPipelineExecutableInternalRepresentationKHR *pStatistics) const {
bool skip = ValidatePipelineExecutableInfo(device, pExecutableInfo);
const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline);
if (!(pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableInternalRepresentationsKHR-pipeline-03278",
"vkGetPipelineExecutableInternalRepresentationsKHR called on a pipeline created without the "
"VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR flag set");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout) const {
return cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo(
this, pCreateInfo, IsExtEnabled(device_extensions.vk_khr_push_descriptor), phys_dev_ext_props.max_push_descriptors,
IsExtEnabled(device_extensions.vk_ext_descriptor_indexing), &enabled_features.core12,
&enabled_features.inline_uniform_block, &phys_dev_ext_props.inline_uniform_block_props, &device_extensions);
}
// Used by CreatePipelineLayout and CmdPushConstants.
// Note that the index argument is optional and only used by CreatePipelineLayout.
bool CoreChecks::ValidatePushConstantRange(const uint32_t offset, const uint32_t size, const char *caller_name,
uint32_t index = 0) const {
if (disabled[push_constant_range]) return false;
uint32_t const max_push_constants_size = phys_dev_props.limits.maxPushConstantsSize;
bool skip = false;
// Check that offset + size don't exceed the max.
// Prevent arithetic overflow here by avoiding addition and testing in this order.
if ((offset >= max_push_constants_size) || (size > max_push_constants_size - offset)) {
// This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (offset >= max_push_constants_size) {
skip |= LogError(
device, "VUID-VkPushConstantRange-offset-00294",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, max_push_constants_size);
}
if (size > max_push_constants_size - offset) {
skip |= LogError(device, "VUID-VkPushConstantRange-size-00298",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, max_push_constants_size);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (offset >= max_push_constants_size) {
skip |= LogError(
device, "VUID-vkCmdPushConstants-offset-00370",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, max_push_constants_size);
}
if (size > max_push_constants_size - offset) {
skip |= LogError(device, "VUID-vkCmdPushConstants-size-00371",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, max_push_constants_size);
}
} else {
skip |= LogError(device, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// size needs to be non-zero and a multiple of 4.
if ((size == 0) || ((size & 0x3) != 0)) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (size == 0) {
skip |= LogError(device, "VUID-VkPushConstantRange-size-00296",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= LogError(device, "VUID-VkPushConstantRange-size-00297",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (size == 0) {
skip |= LogError(device, "VUID-vkCmdPushConstants-size-arraylength",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= LogError(device, "VUID-vkCmdPushConstants-size-00369",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else {
skip |= LogError(device, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// offset needs to be a multiple of 4.
if ((offset & 0x3) != 0) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
skip |= LogError(device, "VUID-VkPushConstantRange-offset-00295",
"%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name,
index, offset);
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
skip |= LogError(device, "VUID-vkCmdPushConstants-offset-00368",
"%s call has push constants with offset %u. Offset must be a multiple of 4.", caller_name, offset);
} else {
skip |= LogError(device, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
return skip;
}
enum DSL_DESCRIPTOR_GROUPS {
DSL_TYPE_SAMPLERS = 0,
DSL_TYPE_UNIFORM_BUFFERS,
DSL_TYPE_STORAGE_BUFFERS,
DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES,
DSL_TYPE_INPUT_ATTACHMENTS,
DSL_TYPE_INLINE_UNIFORM_BLOCK,
DSL_NUM_DESCRIPTOR_GROUPS
};
// Used by PreCallValidateCreatePipelineLayout.
// Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
const DeviceFeatures *enabled_features,
const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) {
// Identify active pipeline stages
std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
VK_SHADER_STAGE_COMPUTE_BIT};
if (enabled_features->core.geometryShader) {
stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
}
if (enabled_features->core.tessellationShader) {
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
}
// Allow iteration over enum values
std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {
DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK};
// Sum by layouts per stage, then pick max of stages per type
std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages
for (auto stage : stage_flags) {
std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums
for (const auto &dsl : set_layouts) {
if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (0 != (stage & binding->stageFlags) && binding->descriptorCount > 0) {
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
// count one block per binding. descriptorCount is number of bytes
stage_sum[DSL_TYPE_INLINE_UNIFORM_BLOCK]++;
break;
default:
break;
}
}
}
}
for (auto type : dsl_groups) {
max_sum[type] = std::max(stage_sum[type], max_sum[type]);
}
}
return max_sum;
}
// Used by PreCallValidateCreatePipelineLayout.
// Returns a map indexed by VK_DESCRIPTOR_TYPE_* enum of the summed descriptors by type.
// Note: descriptors only count against the limit once even if used by multiple stages.
std::map<uint32_t, uint32_t> GetDescriptorSum(
const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) {
std::map<uint32_t, uint32_t> sum_by_type;
for (const auto &dsl : set_layouts) {
if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (binding->descriptorCount > 0) {
if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
// count one block per binding. descriptorCount is number of bytes
sum_by_type[binding->descriptorType]++;
} else {
sum_by_type[binding->descriptorType] += binding->descriptorCount;
}
}
}
}
return sum_by_type;
}
bool CoreChecks::PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineLayout *pPipelineLayout) const {
bool skip = false;
// Validate layout count against device physical limit
if (pCreateInfo->setLayoutCount > phys_dev_props.limits.maxBoundDescriptorSets) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286",
"vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).",
pCreateInfo->setLayoutCount, phys_dev_props.limits.maxBoundDescriptorSets);
}
// Validate Push Constant ranges
uint32_t i, j;
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
skip |= ValidatePushConstantRange(pCreateInfo->pPushConstantRanges[i].offset, pCreateInfo->pPushConstantRanges[i].size,
"vkCreatePipelineLayout()", i);
if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
skip |= LogError(device, "VUID-VkPushConstantRange-stageFlags-requiredbitmask",
"vkCreatePipelineLayout() call has no stageFlags set.");
}
}
// As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j);
}
}
}
// Early-out
if (skip) return skip;
std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
unsigned int push_descriptor_set_count = 0;
{
for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
set_layouts[i] = GetDescriptorSetLayoutShared(pCreateInfo->pSetLayouts[i]);
if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
}
}
if (push_descriptor_set_count > 1) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293",
"vkCreatePipelineLayout() Multiple push descriptor sets found.");
}
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, true);
// Samplers
if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > phys_dev_props.limits.maxPerStageDescriptorSamplers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03016"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorSamplers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLERS], phys_dev_props.limits.maxPerStageDescriptorSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03017"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUniformBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS],
phys_dev_props.limits.maxPerStageDescriptorUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03018"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS],
phys_dev_props.limits.maxPerStageDescriptorStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03019"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290";
skip |=
LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorSampledImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props.limits.maxPerStageDescriptorSampledImages);
}
// Storage images
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03020"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291";
skip |=
LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES], phys_dev_props.limits.maxPerStageDescriptorStorageImages);
}
// Input attachments
if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03021"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorInputAttachments limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
phys_dev_props.limits.maxPerStageDescriptorInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02214"
: "VUID-VkPipelineLayoutCreateInfo-descriptorType-02212";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorInlineUniformBlocks limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK],
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks);
}
// Total descriptors by type
//
std::map<uint32_t, uint32_t> sum_all_stages = GetDescriptorSum(set_layouts, true);
// Samplers
uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > phys_dev_props.limits.maxDescriptorSetSamplers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03028"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetSamplers limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetSamplers);
}
// Uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03029"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props.limits.maxDescriptorSetUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03030"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03031"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props.limits.maxDescriptorSetStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03032"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > phys_dev_props.limits.maxDescriptorSetSampledImages) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03033"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetSampledImages limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetSampledImages);
}
// Storage images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > phys_dev_props.limits.maxDescriptorSetStorageImages) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03034"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageImages limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetStorageImages);
}
// Input attachments
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props.limits.maxDescriptorSetInputAttachments) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03035"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684";
skip |=
LogError(device, vuid,
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetInputAttachments limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props.limits.maxDescriptorSetInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02216"
: "VUID-VkPipelineLayoutCreateInfo-descriptorType-02213";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetInlineUniformBlocks limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks);
}
if (device_extensions.vk_ext_descriptor_indexing) {
// XXX TODO: replace with correct VU messages
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind =
GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, false);
// Samplers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022",
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03023",
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03024",
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025",
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages);
}
// Storage images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03026",
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages);
}
// Input attachments
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03027",
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02215",
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK],
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks);
}
// Total descriptors by type, summed across all pipeline stages
//
std::map<uint32_t, uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(set_layouts, false);
// Samplers
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036",
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSamplers limit (%d).",
sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers);
}
// Uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037",
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) {
skip |=
LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038",
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039",
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) {
skip |=
LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040",
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041",
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSampledImages limit (%d).",
sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages);
}
// Storage images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042",
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageImages limit (%d).",
sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages);
}
// Input attachments
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043",
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02217",
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInlineUniformBlocks limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks);
}
}
if (device_extensions.vk_ext_fragment_density_map_2) {
uint32_t sum_subsampled_samplers = 0;
for (const auto &dsl : set_layouts) {
// find the number of subsampled samplers across all stages
// NOTE: this does not use the GetDescriptorSum patter because it needs the GetSamplerState method
if ((dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (binding->descriptorCount > 0) {
if (((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) ||
(binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)) &&
(binding->pImmutableSamplers != nullptr)) {
for (uint32_t sampler_idx = 0; sampler_idx < binding->descriptorCount; sampler_idx++) {
const SAMPLER_STATE *state = GetSamplerState(binding->pImmutableSamplers[sampler_idx]);
if (state->createInfo.flags & (VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT |
VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT)) {
sum_subsampled_samplers++;
}
}
}
}
}
}
if (sum_subsampled_samplers > phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pImmutableSamplers-03566",
"vkCreatePipelineLayout(): sum of sampler bindings with flags containing "
"VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT or "
"VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT among all stages(% d) "
"exceeds device maxDescriptorSetSubsampledSamplers limit (%d).",
sum_subsampled_samplers,
phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers);
}
}
return skip;
}
bool CoreChecks::PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) const {
// Make sure sets being destroyed are not currently in-use
if (disabled[idle_descriptor_set]) return false;
bool skip = false;
const DESCRIPTOR_POOL_STATE *pool = GetDescriptorPoolState(descriptorPool);
if (pool != nullptr) {
for (auto *ds : pool->sets) {
if (ds && ds->in_use.load()) {
skip |= LogError(descriptorPool, "VUID-vkResetDescriptorPool-descriptorPool-00313",
"It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer.");
if (skip) break;
}
}
}
return skip;
}
// Ensure the pool contains enough descriptors and descriptor sets to satisfy
// an allocation request. Fills common_data with the total number of descriptors of each type required,
// as well as DescriptorSetLayout ptrs used for later update.
bool CoreChecks::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets, void *ads_state_data) const {
StateTracker::PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, ads_state_data);
cvdescriptorset::AllocateDescriptorSetsData *ads_state =
reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data);
// All state checks for AllocateDescriptorSets is done in single function
return ValidateAllocateDescriptorSets(pAllocateInfo, ads_state);
}
bool CoreChecks::PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
const VkDescriptorSet *pDescriptorSets) const {
// Make sure that no sets being destroyed are in-flight
bool skip = false;
// First make sure sets being destroyed are not currently in-use
for (uint32_t i = 0; i < count; ++i) {
if (pDescriptorSets[i] != VK_NULL_HANDLE) {
skip |= ValidateIdleDescriptorSet(pDescriptorSets[i], "vkFreeDescriptorSets");
}
}
const DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool);
if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
// Can't Free from a NON_FREE pool
skip |= LogError(descriptorPool, "VUID-vkFreeDescriptorSets-descriptorPool-00312",
"It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
"VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
}
return skip;
}
bool CoreChecks::PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) const {
// First thing to do is perform map look-ups.
// NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
// so we can't just do a single map look-up up-front, but do them individually in functions below
// Now make call(s) that validate state, but don't perform state updates in this function
// Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
// namespace which will parse params and make calls into specific class instances
return ValidateUpdateDescriptorSets(descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies,
"vkUpdateDescriptorSets()");
}
bool CoreChecks::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
bool skip = false;
if (cb_state->in_use.load()) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"Calling vkBeginCommandBuffer() on active %s before it has completed. You must check "
"command buffer fence before this call.",
report_data->FormatHandle(commandBuffer).c_str());
}
if (cb_state->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
// Primary Command Buffer
const VkCommandBufferUsageFlags invalid_usage =
(VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT);
if ((pBeginInfo->flags & invalid_usage) == invalid_usage) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-02840",
"vkBeginCommandBuffer(): Primary %s can't have both VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT and "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(commandBuffer).c_str());
}
} else {
// Secondary Command Buffer
const VkCommandBufferInheritanceInfo *info = pBeginInfo->pInheritanceInfo;
if (!info) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00051",
"vkBeginCommandBuffer(): Secondary %s must have inheritance info.",
report_data->FormatHandle(commandBuffer).c_str());
} else {
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
assert(info->renderPass);
const auto *framebuffer = GetFramebufferState(info->framebuffer);
if (framebuffer) {
if (framebuffer->createInfo.renderPass != info->renderPass) {
const auto *render_pass = GetRenderPassState(info->renderPass);
// renderPass that framebuffer was created with must be compatible with local renderPass
skip |= ValidateRenderPassCompatibility("framebuffer", framebuffer->rp_state.get(), "command buffer",
render_pass, "vkBeginCommandBuffer()",
"VUID-VkCommandBufferBeginInfo-flags-00055");
}
}
}
if ((info->occlusionQueryEnable == VK_FALSE || enabled_features.core.occlusionQueryPrecise == VK_FALSE) &&
(info->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00052",
"vkBeginCommandBuffer(): Secondary %s must not have VK_QUERY_CONTROL_PRECISE_BIT if "
"occulusionQuery is disabled or the device does not support precise occlusion queries.",
report_data->FormatHandle(commandBuffer).c_str());
}
auto p_inherited_viewport_scissor_info =
LvlFindInChain<VkCommandBufferInheritanceViewportScissorInfoNV>(info->pNext);
if (p_inherited_viewport_scissor_info != nullptr && p_inherited_viewport_scissor_info->viewportScissor2D) {
if (!enabled_features.inherited_viewport_scissor_features.inheritedViewportScissor2D)
{
skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceViewportScissorInfoNV-viewportScissor2D-04782",
"vkBeginCommandBuffer(): inheritedViewportScissor2D feature not enabled.");
}
if (!(pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceViewportScissorInfoNV-viewportScissor2D-04786",
"vkBeginCommandBuffer(): Secondary %s must be recorded with the"
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT if viewportScissor2D is VK_TRUE.",
report_data->FormatHandle(commandBuffer).c_str());
}
if (p_inherited_viewport_scissor_info->viewportDepthCount == 0) {
skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceViewportScissorInfoNV-viewportScissor2D-04784",
"vkBeginCommandBuffer(): "
"If viewportScissor2D is VK_TRUE, then viewportDepthCount must be greater than 0.",
report_data->FormatHandle(commandBuffer).c_str());
}
}
}
if (info && info->renderPass != VK_NULL_HANDLE) {
const auto *render_pass = GetRenderPassState(info->renderPass);
if (render_pass) {
if (info->subpass >= render_pass->createInfo.subpassCount) {
skip |= LogError(commandBuffer, "VUID-VkCommandBufferBeginInfo-flags-00054",
"vkBeginCommandBuffer(): Secondary %s must have a subpass index (%d) that is "
"less than the number of subpasses (%d).",
report_data->FormatHandle(commandBuffer).c_str(), info->subpass,
render_pass->createInfo.subpassCount);
}
}
}
}
if (CB_RECORDING == cb_state->state) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"vkBeginCommandBuffer(): Cannot call Begin on %s in the RECORDING state. Must first call "
"vkEndCommandBuffer().",
report_data->FormatHandle(commandBuffer).c_str());
} else if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) {
VkCommandPool cmd_pool = cb_state->createInfo.commandPool;
const auto *pool = cb_state->command_pool.get();
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pool->createFlags)) {
LogObjectList objlist(commandBuffer);
objlist.add(cmd_pool);
skip |= LogError(objlist, "VUID-vkBeginCommandBuffer-commandBuffer-00050",
"Call to vkBeginCommandBuffer() on %s attempts to implicitly reset cmdBuffer created from "
"%s that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmd_pool).c_str());
}
}
auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext);
if (chained_device_group_struct) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, commandBuffer,
"VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00106");
skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, commandBuffer,
"VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00107");
}
return skip;
}
bool CoreChecks::PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
bool skip = false;
if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == cb_state->createInfo.level) ||
!(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// This needs spec clarification to update valid usage, see comments in PR:
// https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165
skip |= InsideRenderPass(cb_state, "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060");
}
if (cb_state->state == CB_INVALID_COMPLETE || cb_state->state == CB_INVALID_INCOMPLETE) {
skip |= ReportInvalidCommandBuffer(cb_state, "vkEndCommandBuffer()");
} else if (CB_RECORDING != cb_state->state) {
skip |= LogError(
commandBuffer, "VUID-vkEndCommandBuffer-commandBuffer-00059",
"vkEndCommandBuffer(): Cannot call End on %s when not in the RECORDING state. Must first call vkBeginCommandBuffer().",
report_data->FormatHandle(commandBuffer).c_str());
}
for (const auto &query : cb_state->activeQueries) {
skip |= LogError(commandBuffer, "VUID-vkEndCommandBuffer-commandBuffer-00061",
"vkEndCommandBuffer(): Ending command buffer with in progress query: %s, query %d.",
report_data->FormatHandle(query.pool).c_str(), query.query);
}
return skip;
}
bool CoreChecks::PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
VkCommandPool cmd_pool = cb_state->createInfo.commandPool;
const auto *pool = cb_state->command_pool.get();
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pool->createFlags)) {
LogObjectList objlist(commandBuffer);
objlist.add(cmd_pool);
skip |= LogError(objlist, "VUID-vkResetCommandBuffer-commandBuffer-00046",
"vkResetCommandBuffer(): Attempt to reset %s created from %s that does NOT have the "
"VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmd_pool).c_str());
}
skip |= CheckCommandBufferInFlight(cb_state, "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045");
return skip;
}
static const char *GetPipelineTypeName(VkPipelineBindPoint pipelineBindPoint) {
switch (pipelineBindPoint) {
case VK_PIPELINE_BIND_POINT_GRAPHICS:
return "graphics";
case VK_PIPELINE_BIND_POINT_COMPUTE:
return "compute";
case VK_PIPELINE_BIND_POINT_RAY_TRACING_NV:
return "ray-tracing";
default:
return "unknown";
}
}
bool CoreChecks::ValidateGraphicsPipelineBindPoint(const CMD_BUFFER_STATE *cb_state, const PIPELINE_STATE *pipeline_state) const {
bool skip = false;
const FRAMEBUFFER_STATE *fb_state = cb_state->activeFramebuffer.get();
if (fb_state) {
auto subpass_desc = &pipeline_state->rp_state->createInfo.pSubpasses[pipeline_state->graphicsPipelineCI.subpass];
for (size_t i = 0; i < pipeline_state->attachments.size() && i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
const auto *imageview_state = GetActiveAttachmentImageViewState(cb_state, attachment);
if (!imageview_state) continue;
const IMAGE_STATE *image_state = GetImageState(imageview_state->create_info.image);
if (!image_state) continue;
const VkFormat format = pipeline_state->rp_state->createInfo.pAttachments[attachment].format;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(format);
if (pipeline_state->graphicsPipelineCI.pRasterizationState &&
!pipeline_state->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable &&
pipeline_state->attachments[i].blendEnable && !(format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-blendEnable-04717",
"vkCreateGraphicsPipelines(): pipeline.pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].blendEnable is VK_TRUE but format %s associated with this attached image (%s) does "
"not support VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT.",
i, report_data->FormatHandle(image_state->image).c_str(), string_VkFormat(format));
}
}
}
if (cb_state->inheritedViewportDepths.size() != 0) {
bool dyn_viewport = IsDynamic(pipeline_state, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT)
|| IsDynamic(pipeline_state, VK_DYNAMIC_STATE_VIEWPORT);
bool dyn_scissor = IsDynamic(pipeline_state, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT)
|| IsDynamic(pipeline_state, VK_DYNAMIC_STATE_SCISSOR);
if (!dyn_viewport || !dyn_scissor) {
skip |= LogError(device, "VUID-vkCmdBindPipeline-commandBuffer-04808",
"Graphics pipeline incompatible with viewport/scissor inheritance.");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipeline pipeline) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindPipeline-pipelineBindPoint-00777"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindPipeline-pipelineBindPoint-00778"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdBindPipeline-pipelineBindPoint-02391")};
skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors);
const auto *pipeline_state = GetPipelineState(pipeline);
assert(pipeline_state);
const auto &pipeline_state_bind_point = pipeline_state->getPipelineType();
if (pipelineBindPoint != pipeline_state_bind_point) {
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindPipeline-pipelineBindPoint-00779",
"Cannot bind a pipeline of type %s to the graphics pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
} else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindPipeline-pipelineBindPoint-00780",
"Cannot bind a pipeline of type %s to the compute pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
} else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindPipeline-pipelineBindPoint-02392",
"Cannot bind a pipeline of type %s to the ray-tracing pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
}
} else {
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
skip |= ValidateGraphicsPipelineBindPoint(cb_state, pipeline_state);
}
}
return skip;
}
bool CoreChecks::ForbidInheritedViewportScissor(VkCommandBuffer commandBuffer, const CMD_BUFFER_STATE *cb_state,
const char* vuid, const char *cmdName) const {
bool skip = false;
if (cb_state->inheritedViewportDepths.size() != 0) {
skip |= LogError(
commandBuffer, vuid,
"%s: commandBuffer must not have VkCommandBufferInheritanceViewportScissorInfoNV::viewportScissor2D enabled.", cmdName);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
const VkViewport *pViewports) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORT, "vkCmdSetViewport()");
skip |=
ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetViewport-commandBuffer-04821", "vkCmdSetViewport");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
const VkRect2D *pScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSCISSOR, "vkCmdSetScissor()");
skip |=
ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetScissor-viewportScissor2D-04789", "vkCmdSetScissor");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETEXCLUSIVESCISSORNV, "vkCmdSetExclusiveScissorNV()");
if (!enabled_features.exclusive_scissor.exclusiveScissor) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-None-02031",
"vkCmdSetExclusiveScissorNV: The exclusiveScissor feature is disabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView,
VkImageLayout imageLayout) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BINDSHADINGRATEIMAGENV, "vkCmdBindShadingRateImageNV()");
if (!enabled_features.shading_rate_image.shadingRateImage) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindShadingRateImageNV-None-02058",
"vkCmdBindShadingRateImageNV: The shadingRateImage feature is disabled.");
}
if (imageView != VK_NULL_HANDLE) {
const auto view_state = GetImageViewState(imageView);
auto &ivci = view_state->create_info;
if (!view_state || (ivci.viewType != VK_IMAGE_VIEW_TYPE_2D && ivci.viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02059",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid "
"VkImageView handle of type VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY.");
}
if (view_state && ivci.format != VK_FORMAT_R8_UINT) {
skip |= LogError(
imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02060",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must have a format of VK_FORMAT_R8_UINT.");
}
const VkImageCreateInfo *ici = view_state ? &GetImageState(view_state->create_info.image)->createInfo : nullptr;
if (ici && !(ici->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV)) {
skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02061",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, the image must have been "
"created with VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV set.");
}
if (view_state) {
const auto image_state = GetImageState(view_state->create_info.image);
bool hit_error = false;
// XXX TODO: While the VUID says "each subresource", only the base mip level is
// actually used. Since we don't have an existing convenience function to iterate
// over all mip levels, just don't bother with non-base levels.
const VkImageSubresourceRange &range = view_state->create_info.subresourceRange;
VkImageSubresourceLayers subresource = {range.aspectMask, range.baseMipLevel, range.baseArrayLayer, range.layerCount};
if (image_state) {
skip |= VerifyImageLayout(cb_state, image_state, subresource, imageLayout, VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV,
"vkCmdCopyImage()", "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063",
"VUID-vkCmdBindShadingRateImageNV-imageView-02062", &hit_error);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount,
const VkShadingRatePaletteNV *pShadingRatePalettes) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTSHADINGRATEPALETTENV, "vkCmdSetViewportShadingRatePaletteNV()");
if (!enabled_features.shading_rate_image.shadingRateImage) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02064",
"vkCmdSetViewportShadingRatePaletteNV: The shadingRateImage feature is disabled.");
}
for (uint32_t i = 0; i < viewportCount; ++i) {
auto *palette = &pShadingRatePalettes[i];
if (palette->shadingRatePaletteEntryCount == 0 ||
palette->shadingRatePaletteEntryCount > phys_dev_ext_props.shading_rate_image_props.shadingRatePaletteSize) {
skip |= LogError(
commandBuffer, "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071",
"vkCmdSetViewportShadingRatePaletteNV: shadingRatePaletteEntryCount must be between 1 and shadingRatePaletteSize.");
}
}
return skip;
}
bool CoreChecks::ValidateGeometryTrianglesNV(const VkGeometryTrianglesNV &triangles, const char *func_name) const {
bool skip = false;
const BUFFER_STATE *vb_state = GetBufferState(triangles.vertexData);
if (vb_state != nullptr && vb_state->createInfo.size <= triangles.vertexOffset) {
skip |= LogError(device, "VUID-VkGeometryTrianglesNV-vertexOffset-02428", "%s", func_name);
}
const BUFFER_STATE *ib_state = GetBufferState(triangles.indexData);
if (ib_state != nullptr && ib_state->createInfo.size <= triangles.indexOffset) {
skip |= LogError(device, "VUID-VkGeometryTrianglesNV-indexOffset-02431", "%s", func_name);
}
const BUFFER_STATE *td_state = GetBufferState(triangles.transformData);
if (td_state != nullptr && td_state->createInfo.size <= triangles.transformOffset) {
skip |= LogError(device, "VUID-VkGeometryTrianglesNV-transformOffset-02437", "%s", func_name);
}
return skip;
}
bool CoreChecks::ValidateGeometryAABBNV(const VkGeometryAABBNV &aabbs, const char *func_name) const {
bool skip = false;
const BUFFER_STATE *aabb_state = GetBufferState(aabbs.aabbData);
if (aabb_state != nullptr && aabb_state->createInfo.size > 0 && aabb_state->createInfo.size <= aabbs.offset) {
skip |= LogError(device, "VUID-VkGeometryAABBNV-offset-02439", "%s", func_name);
}
return skip;
}
bool CoreChecks::ValidateGeometryNV(const VkGeometryNV &geometry, const char *func_name) const {
bool skip = false;
if (geometry.geometryType == VK_GEOMETRY_TYPE_TRIANGLES_NV) {
skip = ValidateGeometryTrianglesNV(geometry.geometry.triangles, func_name);
} else if (geometry.geometryType == VK_GEOMETRY_TYPE_AABBS_NV) {
skip = ValidateGeometryAABBNV(geometry.geometry.aabbs, func_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCreateAccelerationStructureNV(VkDevice device,
const VkAccelerationStructureCreateInfoNV *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkAccelerationStructureNV *pAccelerationStructure) const {
bool skip = false;
if (pCreateInfo != nullptr && pCreateInfo->info.type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) {
for (uint32_t i = 0; i < pCreateInfo->info.geometryCount; i++) {
skip |= ValidateGeometryNV(pCreateInfo->info.pGeometries[i], "vkCreateAccelerationStructureNV():");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateAccelerationStructureKHR(VkDevice device,
const VkAccelerationStructureCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkAccelerationStructureKHR *pAccelerationStructure) const {
bool skip = false;
if (pCreateInfo) {
const BUFFER_STATE *buffer_state = GetBufferState(pCreateInfo->buffer);
if (buffer_state) {
if (!(buffer_state->createInfo.usage & VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR)) {
skip |=
LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-buffer-03614",
"VkAccelerationStructureCreateInfoKHR(): buffer must have been created with a usage value containing "
"VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR.");
}
if (buffer_state->createInfo.flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT) {
skip |= LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-buffer-03615",
"VkAccelerationStructureCreateInfoKHR(): buffer must not have been created with "
"VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT.");
}
if (pCreateInfo->offset + pCreateInfo->size > buffer_state->createInfo.size) {
skip |= LogError(
device, "VUID-VkAccelerationStructureCreateInfoKHR-offset-03616",
"VkAccelerationStructureCreateInfoKHR(): The sum of offset and size must be less than the size of buffer.");
}
}
}
return skip;
}
bool CoreChecks::ValidateBindAccelerationStructureMemory(VkDevice device,
const VkBindAccelerationStructureMemoryInfoNV &info) const {
bool skip = false;
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(info.accelerationStructure);
if (!as_state) {
return skip;
}
if (!as_state->GetBoundMemory().empty()) {
skip |=
LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-accelerationStructure-03620",
"vkBindAccelerationStructureMemoryNV(): accelerationStructure must not already be backed by a memory object.");
}
// Validate bound memory range information
const auto mem_info = GetDevMemState(info.memory);
if (mem_info) {
skip |= ValidateInsertAccelerationStructureMemoryRange(info.accelerationStructure, mem_info, info.memoryOffset,
"vkBindAccelerationStructureMemoryNV()");
skip |= ValidateMemoryTypes(mem_info, as_state->memory_requirements.memoryRequirements.memoryTypeBits,
"vkBindAccelerationStructureMemoryNV()",
"VUID-VkBindAccelerationStructureMemoryInfoNV-memory-03622");
}
// Validate memory requirements alignment
if (SafeModulo(info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment) != 0) {
skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-03623",
"vkBindAccelerationStructureMemoryNV(): memoryOffset 0x%" PRIxLEAST64
" must be an integer multiple of the alignment 0x%" PRIxLEAST64
" member of the VkMemoryRequirements structure returned from "
"a call to vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure and type of "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV",
info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (as_state->memory_requirements.memoryRequirements.size > (mem_info->alloc_info.allocationSize - info.memoryOffset)) {
skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-size-03624",
"vkBindAccelerationStructureMemoryNV(): The size 0x%" PRIxLEAST64
" member of the VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure and type of "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV must be less than or equal to the size "
"of memory minus memoryOffset 0x%" PRIxLEAST64 ".",
as_state->memory_requirements.memoryRequirements.size,
mem_info->alloc_info.allocationSize - info.memoryOffset);
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount,
const VkBindAccelerationStructureMemoryInfoNV *pBindInfos) const {
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
skip |= ValidateBindAccelerationStructureMemory(device, pBindInfos[i]);
}
return skip;
}
bool CoreChecks::PreCallValidateGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
size_t dataSize, void *pData) const {
bool skip = false;
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(accelerationStructure);
if (as_state != nullptr) {
// TODO: update the fake VUID below once the real one is generated.
skip = ValidateMemoryIsBoundToAccelerationStructure(
as_state, "vkGetAccelerationStructureHandleNV",
"UNASSIGNED-vkGetAccelerationStructureHandleNV-accelerationStructure-XXXX");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructuresKHR(
VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURESKHR, "vkCmdBuildAccelerationStructuresKHR()");
if (pInfos != NULL) {
for (uint32_t info_index = 0; info_index < infoCount; ++info_index) {
const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state =
GetAccelerationStructureStateKHR(pInfos[info_index].srcAccelerationStructure);
const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state =
GetAccelerationStructureStateKHR(pInfos[info_index].dstAccelerationStructure);
if (pInfos[info_index].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) {
if (!src_as_state || (src_as_state && src_as_state->acceleration_structure == VK_NULL_HANDLE)) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03666",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must not be "
"VK_NULL_HANDLE.");
}
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03667",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must "
"have been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in "
"VkAccelerationStructureBuildGeometryInfoKHR::flags.");
}
if (pInfos[info_index].geometryCount != src_as_state->build_info_khr.geometryCount) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03758",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR,"
" its geometryCount member must have the same value which was specified when "
"srcAccelerationStructure was last built.");
}
if (pInfos[info_index].flags != src_as_state->build_info_khr.flags) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03759",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
if (pInfos[info_index].type != src_as_state->build_info_khr.type) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03760",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
}
if (pInfos[info_index].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03700",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have "
"been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
if (pInfos[info_index].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03699",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been "
"created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBuildAccelerationStructuresKHR(
VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const {
bool skip = false;
for (uint32_t i = 0; i < infoCount; ++i) {
const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfos[i].srcAccelerationStructure);
const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state = GetAccelerationStructureStateKHR(pInfos[i].dstAccelerationStructure);
if (pInfos[i].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) {
if (!src_as_state || (src_as_state && !src_as_state->acceleration_structure)) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03666",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must not be "
"VK_NULL_HANDLE.");
}
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03667",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must have "
"been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in "
"VkAccelerationStructureBuildGeometryInfoKHR::flags.");
}
if (pInfos[i].geometryCount != src_as_state->build_info_khr.geometryCount) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03758",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR,"
" its geometryCount member must have the same value which was specified when "
"srcAccelerationStructure was last built.");
}
if (pInfos[i].flags != src_as_state->build_info_khr.flags) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03759",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
if (pInfos[i].type != src_as_state->build_info_khr.type) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03760",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
}
if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03700",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have "
"been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03699",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been "
"created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer,
const VkAccelerationStructureInfoNV *pInfo, VkBuffer instanceData,
VkDeviceSize instanceOffset, VkBool32 update,
VkAccelerationStructureNV dst, VkAccelerationStructureNV src,
VkBuffer scratch, VkDeviceSize scratchOffset) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURENV, "vkCmdBuildAccelerationStructureNV()");
if (pInfo != nullptr && pInfo->type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) {
for (uint32_t i = 0; i < pInfo->geometryCount; i++) {
skip |= ValidateGeometryNV(pInfo->pGeometries[i], "vkCmdBuildAccelerationStructureNV():");
}
}
if (pInfo != nullptr && pInfo->geometryCount > phys_dev_ext_props.ray_tracing_propsNV.maxGeometryCount) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-geometryCount-02241",
"vkCmdBuildAccelerationStructureNV(): geometryCount [%d] must be less than or equal to "
"VkPhysicalDeviceRayTracingPropertiesNV::maxGeometryCount.",
pInfo->geometryCount);
}
const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureStateNV(dst);
const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureStateNV(src);
const BUFFER_STATE *scratch_buffer_state = GetBufferState(scratch);
if (dst_as_state != nullptr && pInfo != nullptr) {
if (dst_as_state->create_infoNV.info.type != pInfo->type) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::type"
"[%s] must be identical to build info VkAccelerationStructureInfoNV::type [%s].",
string_VkAccelerationStructureTypeNV(dst_as_state->create_infoNV.info.type),
string_VkAccelerationStructureTypeNV(pInfo->type));
}
if (dst_as_state->create_infoNV.info.flags != pInfo->flags) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::flags"
"[0x%X] must be identical to build info VkAccelerationStructureInfoNV::flags [0x%X].",
dst_as_state->create_infoNV.info.flags, pInfo->flags);
}
if (dst_as_state->create_infoNV.info.instanceCount < pInfo->instanceCount) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::instanceCount "
"[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::instanceCount [%d].",
dst_as_state->create_infoNV.info.instanceCount, pInfo->instanceCount);
}
if (dst_as_state->create_infoNV.info.geometryCount < pInfo->geometryCount) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::geometryCount"
"[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::geometryCount [%d].",
dst_as_state->create_infoNV.info.geometryCount, pInfo->geometryCount);
} else {
for (uint32_t i = 0; i < pInfo->geometryCount; i++) {
const VkGeometryDataNV &create_geometry_data = dst_as_state->create_infoNV.info.pGeometries[i].geometry;
const VkGeometryDataNV &build_geometry_data = pInfo->pGeometries[i].geometry;
if (create_geometry_data.triangles.vertexCount < build_geometry_data.triangles.vertexCount) {
skip |= LogError(
commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.vertexCount [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.triangles.vertexCount [%d].",
i, create_geometry_data.triangles.vertexCount, i, build_geometry_data.triangles.vertexCount);
break;
}
if (create_geometry_data.triangles.indexCount < build_geometry_data.triangles.indexCount) {
skip |= LogError(
commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.indexCount [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.triangles.indexCount [%d].",
i, create_geometry_data.triangles.indexCount, i, build_geometry_data.triangles.indexCount);
break;
}
if (create_geometry_data.aabbs.numAABBs < build_geometry_data.aabbs.numAABBs) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.aabbs.numAABBs [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.aabbs.numAABBs [%d].",
i, create_geometry_data.aabbs.numAABBs, i, build_geometry_data.aabbs.numAABBs);
break;
}
}
}
}
if (dst_as_state != nullptr) {
skip |= ValidateMemoryIsBoundToAccelerationStructure(
dst_as_state, "vkCmdBuildAccelerationStructureNV()",
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
}
if (update == VK_TRUE) {
if (src == VK_NULL_HANDLE) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02489",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must not be VK_NULL_HANDLE.");
} else {
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02490",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must have been built before "
"with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV set in "
"VkAccelerationStructureInfoNV::flags.");
}
}
if (dst_as_state != nullptr && !dst_as_state->update_scratch_memory_requirements_checked) {
skip |=
LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoUpdateMemReqQuery,
"vkCmdBuildAccelerationStructureNV(): Updating %s but vkGetAccelerationStructureMemoryRequirementsNV() "
"has not been called for update scratch memory.",
report_data->FormatHandle(dst_as_state->acceleration_structure).c_str());
// Use requirements fetched at create time
}
if (scratch_buffer_state != nullptr && dst_as_state != nullptr &&
dst_as_state->update_scratch_memory_requirements.memoryRequirements.size >
(scratch_buffer_state->createInfo.size - scratchOffset)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02492",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, The size member of the "
"VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with "
"VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and "
"VkAccelerationStructureMemoryRequirementsInfoNV::type set to "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV must be less than "
"or equal to the size of scratch minus scratchOffset");
}
} else {
if (dst_as_state != nullptr && !dst_as_state->build_scratch_memory_requirements_checked) {
skip |= LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoScratchMemReqQuery,
"vkCmdBuildAccelerationStructureNV(): Assigning scratch buffer to %s but "
"vkGetAccelerationStructureMemoryRequirementsNV() has not been called for scratch memory.",
report_data->FormatHandle(dst_as_state->acceleration_structure).c_str());
// Use requirements fetched at create time
}
if (scratch_buffer_state != nullptr && dst_as_state != nullptr &&
dst_as_state->build_scratch_memory_requirements.memoryRequirements.size >
(scratch_buffer_state->createInfo.size - scratchOffset)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02491",
"vkCmdBuildAccelerationStructureNV(): If update is VK_FALSE, The size member of the "
"VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with "
"VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and "
"VkAccelerationStructureMemoryRequirementsInfoNV::type set to "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV must be less than "
"or equal to the size of scratch minus scratchOffset");
}
}
if (instanceData != VK_NULL_HANDLE) {
const auto buffer_state = GetBufferState(instanceData);
if (buffer_state != nullptr) {
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true,
"VUID-VkAccelerationStructureInfoNV-instanceData-02782",
"vkCmdBuildAccelerationStructureNV()", "VK_BUFFER_USAGE_RAY_TRACING_BIT_NV");
}
}
if (scratch_buffer_state != nullptr) {
skip |= ValidateBufferUsageFlags(scratch_buffer_state, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true,
"VUID-VkAccelerationStructureInfoNV-scratch-02781", "vkCmdBuildAccelerationStructureNV()",
"VK_BUFFER_USAGE_RAY_TRACING_BIT_NV");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst,
VkAccelerationStructureNV src,
VkCopyAccelerationStructureModeNV mode) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURENV, "vkCmdCopyAccelerationStructureNV()");
const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureStateNV(dst);
const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureStateNV(src);
if (dst_as_state != nullptr) {
skip |= ValidateMemoryIsBoundToAccelerationStructure(
dst_as_state, "vkCmdBuildAccelerationStructureNV()",
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
}
if (mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV) {
if (src_as_state != nullptr &&
(!src_as_state->built || !(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV))) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-src-03411",
"vkCmdCopyAccelerationStructureNV(): src must have been built with "
"VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV if mode is "
"VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV.");
}
}
if (!(mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV || mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR)) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-mode-03410",
"vkCmdCopyAccelerationStructureNV():mode must be VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR"
"or VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR.");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
const VkAllocationCallbacks *pAllocator) const {
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(accelerationStructure);
const VulkanTypedHandle obj_struct(accelerationStructure, kVulkanObjectTypeAccelerationStructureNV);
bool skip = false;
if (as_state) {
skip |= ValidateObjectNotInUse(as_state, obj_struct, "vkDestroyAccelerationStructureNV",
"VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyAccelerationStructureKHR(VkDevice device, VkAccelerationStructureKHR accelerationStructure,
const VkAllocationCallbacks *pAllocator) const {
const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(accelerationStructure);
const VulkanTypedHandle obj_struct(accelerationStructure, kVulkanObjectTypeAccelerationStructureKHR);
bool skip = false;
if (as_state) {
skip |= ValidateObjectNotInUse(as_state, obj_struct, "vkDestroyAccelerationStructureKHR",
"VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442");
}
if (pAllocator && !as_state->allocator) {
skip |= LogError(device, "VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02444",
"vkDestroyAccelerationStructureKH:If no VkAllocationCallbacks were provided when accelerationStructure"
"was created, pAllocator must be NULL.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportWScalingNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount,
const VkViewportWScalingNV *pViewportWScalings) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTWSCALINGNV, "vkCmdSetViewportWScalingNV()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetLineStippleEXT(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor,
uint16_t lineStipplePattern) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETLINESTIPPLEEXT, "vkCmdSetLineStippleEXT()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
float depthBiasSlopeFactor) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()");
if ((depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBias-depthBiasClamp-00790",
"vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
"be set to 0.0.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()");
// The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs
if (!device_extensions.vk_ext_depth_range_unrestricted) {
if (!(minDepthBounds >= 0.0) || !(minDepthBounds <= 1.0)) {
// Also VUID-vkCmdSetDepthBounds-minDepthBounds-00600
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBounds-minDepthBounds-02508",
"vkCmdSetDepthBounds(): VK_EXT_depth_range_unrestricted extension is not enabled and minDepthBounds "
"(=%f) is not within the [0.0, 1.0] range.",
minDepthBounds);
}
if (!(maxDepthBounds >= 0.0) || !(maxDepthBounds <= 1.0)) {
// Also VUID-vkCmdSetDepthBounds-maxDepthBounds-00601
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBounds-maxDepthBounds-02509",
"vkCmdSetDepthBounds(): VK_EXT_depth_range_unrestricted extension is not enabled and maxDepthBounds "
"(=%f) is not within the [0.0, 1.0] range.",
maxDepthBounds);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t compareMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t writeMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t reference) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()");
return skip;
}
bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
const uint32_t *pDynamicOffsets) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
// Track total count of dynamic descriptor types to make sure we have an offset for each one
uint32_t total_dynamic_descriptors = 0;
string error_string = "";
const auto *pipeline_layout = GetPipelineLayout(layout);
for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
const cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[set_idx]);
if (descriptor_set) {
// Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
if (!VerifySetLayoutCompatibility(report_data, descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358",
"vkCmdBindDescriptorSets(): descriptorSet #%u being bound is not compatible with overlapping "
"descriptorSetLayout at index %u of "
"%s due to: %s.",
set_idx, set_idx + firstSet, report_data->FormatHandle(layout).c_str(), error_string.c_str());
}
auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
if (set_dynamic_descriptor_count) {
// First make sure we won't overstep bounds of pDynamicOffsets array
if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
// Test/report this here, such that we don't run past the end of pDynamicOffsets in the else clause
skip |=
LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"vkCmdBindDescriptorSets(): descriptorSet #%u (%s) requires %u dynamicOffsets, but only %u "
"dynamicOffsets are left in "
"pDynamicOffsets array. There must be one dynamic offset for each dynamic descriptor being bound.",
set_idx, report_data->FormatHandle(pDescriptorSets[set_idx]).c_str(),
descriptor_set->GetDynamicDescriptorCount(), (dynamicOffsetCount - total_dynamic_descriptors));
// Set the number found to the maximum to prevent duplicate messages, or subsquent descriptor sets from
// testing against the "short tail" we're skipping below.
total_dynamic_descriptors = dynamicOffsetCount;
} else { // Validate dynamic offsets and Dynamic Offset Minimums
// offset for all sets (pDynamicOffsets)
uint32_t cur_dyn_offset = total_dynamic_descriptors;
// offset into this descriptor set
uint32_t set_dyn_offset = 0;
const auto &dsl = descriptor_set->GetLayout();
const auto binding_count = dsl->GetBindingCount();
const auto &limits = phys_dev_props.limits;
for (uint32_t i = 0; i < binding_count; i++) {
const auto *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(i);
// skip checking binding if not needed
if (cvdescriptorset::IsDynamicDescriptor(binding->descriptorType) == false) {
continue;
}
// If a descriptor set has only binding 0 and 2 the binding_index will be 0 and 2
const uint32_t binding_index = binding->binding;
const uint32_t descriptorCount = binding->descriptorCount;
// Need to loop through each descriptor count inside the binding
// if descriptorCount is zero the binding with a dynamic descriptor type does not count
for (uint32_t j = 0; j < descriptorCount; j++) {
const uint32_t offset = pDynamicOffsets[cur_dyn_offset];
if (offset == 0) {
// offset of zero is equivalent of not having the dynamic offset
cur_dyn_offset++;
set_dyn_offset++;
continue;
}
// Validate alignment with limit
if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) &&
(SafeModulo(offset, limits.minUniformBufferOffsetAlignment) != 0)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971",
"vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is %u, but must be a multiple of "
"device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
cur_dyn_offset, offset, limits.minUniformBufferOffsetAlignment);
}
if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
(SafeModulo(offset, limits.minStorageBufferOffsetAlignment) != 0)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972",
"vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is %u, but must be a multiple of "
"device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
cur_dyn_offset, offset, limits.minStorageBufferOffsetAlignment);
}
auto *descriptor = descriptor_set->GetDescriptorFromDynamicOffsetIndex(set_dyn_offset);
assert(descriptor != nullptr);
// Currently only GeneralBuffer are dynamic and need to be checked
if (descriptor->GetClass() == cvdescriptorset::DescriptorClass::GeneralBuffer) {
const auto *buffer_descriptor = static_cast<const cvdescriptorset::BufferDescriptor *>(descriptor);
const VkDeviceSize bound_range = buffer_descriptor->GetRange();
const VkDeviceSize bound_offset = buffer_descriptor->GetOffset();
const BUFFER_STATE *buffer_state = buffer_descriptor->GetBufferState();
assert(buffer_state != nullptr);
// Validate offset didn't go over buffer
if ((bound_range == VK_WHOLE_SIZE) && (offset > 0)) {
LogObjectList objlist(commandBuffer);
objlist.add(pDescriptorSets[set_idx]);
objlist.add(buffer_state->buffer);
skip |=
LogError(objlist, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-01979",
"vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is 0x%x, but must be zero since "
"the buffer descriptor's range is VK_WHOLE_SIZE in descriptorSet #%u binding #%u "
"descriptor[%u].",
cur_dyn_offset, offset, set_idx, binding_index, j);
} else if ((bound_range != VK_WHOLE_SIZE) &&
((offset + bound_range + bound_offset) > buffer_state->createInfo.size)) {
LogObjectList objlist(commandBuffer);
objlist.add(pDescriptorSets[set_idx]);
objlist.add(buffer_state->buffer);
skip |=
LogError(objlist, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-01979",
"vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is 0x%x which when added to the "
"buffer descriptor's range (0x%" PRIxLEAST64
") is greater then the size of the buffer (0x%" PRIxLEAST64
") in descriptorSet #%u binding #%u descriptor[%u].",
cur_dyn_offset, offset, bound_range, buffer_state->createInfo.size, set_idx,
binding_index, j);
}
}
cur_dyn_offset++;
set_dyn_offset++;
} // descriptorCount loop
} // bindingCount loop
// Keep running total of dynamic descriptor count to verify at the end
total_dynamic_descriptors += set_dynamic_descriptor_count;
}
}
} else {
skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-pDescriptorSets-parameter",
"vkCmdBindDescriptorSets(): Attempt to bind %s that doesn't exist!",
report_data->FormatHandle(pDescriptorSets[set_idx]).c_str());
}
}
// dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
if (total_dynamic_descriptors != dynamicOffsetCount) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"vkCmdBindDescriptorSets(): Attempting to bind %u descriptorSets with %u dynamic descriptors, but "
"dynamicOffsetCount is %u. It should "
"exactly match the number of dynamic descriptors.",
setCount, total_dynamic_descriptors, dynamicOffsetCount);
}
// firstSet and descriptorSetCount sum must be less than setLayoutCount
if ((firstSet + setCount) > static_cast<uint32_t>(pipeline_layout->set_layouts.size())) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindDescriptorSets-firstSet-00360",
"vkCmdBindDescriptorSets(): Sum of firstSet (%u) and descriptorSetCount (%u) is greater than "
"VkPipelineLayoutCreateInfo::setLayoutCount "
"(%zu) when pipeline layout was created",
firstSet, setCount, pipeline_layout->set_layouts.size());
}
return skip;
}
// Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
// Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
// TODO add vkCmdBindPipeline bind_point validation using this call.
bool CoreChecks::ValidatePipelineBindPoint(const CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint bind_point, const char *func_name,
const std::map<VkPipelineBindPoint, std::string> &bind_errors) const {
bool skip = false;
auto pool = cb_state->command_pool.get();
if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool
static const std::map<VkPipelineBindPoint, VkQueueFlags> flag_mask = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, static_cast<VkQueueFlags>(VK_QUEUE_COMPUTE_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV,
static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)),
};
const auto &qfp = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex];
if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) {
const std::string &error = bind_errors.at(bind_point);
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(cb_state->createInfo.commandPool);
skip |= LogError(objlist, error, "%s: %s was allocated from %s that does not support bindpoint %s.", func_name,
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(cb_state->createInfo.commandPool).c_str(),
string_VkPipelineBindPoint(bind_point));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *func_name = "vkCmdPushDescriptorSetKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name);
static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363")};
skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, func_name, bind_errors);
const auto layout_data = GetPipelineLayout(layout);
// Validate the set index points to a push descriptor set and is in range
if (layout_data) {
const auto &set_layouts = layout_data->set_layouts;
if (set < set_layouts.size()) {
const auto &dsl = set_layouts[set];
if (dsl) {
if (!dsl->IsPushDescriptor()) {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name,
set, report_data->FormatHandle(layout).c_str());
} else {
// Create an empty proxy in order to use the existing descriptor set update validation
// TODO move the validation (like this) that doesn't need descriptor set state to the DSL object so we
// don't have to do this.
cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this);
skip |= ValidatePushDescriptorsUpdate(&proxy_ds, descriptorWriteCount, pDescriptorWrites, func_name);
}
}
} else {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set,
report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(set_layouts.size()));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkIndexType indexType) const {
const auto buffer_state = GetBufferState(buffer);
const auto cb_node = GetCBState(commandBuffer);
assert(buffer_state);
assert(cb_node);
bool skip =
ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, "VUID-vkCmdBindIndexBuffer-buffer-00433",
"vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
skip |= ValidateCmd(cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindIndexBuffer()", "VUID-vkCmdBindIndexBuffer-buffer-00434");
const auto offset_align = GetIndexAlignment(indexType);
if (offset % offset_align) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00432",
"vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
string_VkIndexType(indexType));
}
if (offset >= buffer_state->requirements.size) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00431",
"vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64
") of buffer (%s).",
offset, buffer_state->requirements.size, report_data->FormatHandle(buffer_state->buffer).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()");
for (uint32_t i = 0; i < bindingCount; ++i) {
const auto buffer_state = GetBufferState(pBuffers[i]);
if (buffer_state) {
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true,
"VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()",
"VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers()",
"VUID-vkCmdBindVertexBuffers-pBuffers-00628");
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer, "VUID-vkCmdBindVertexBuffers-pOffsets-00626",
"vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]);
}
}
}
return skip;
}
// Validate that an image's sampleCount matches the requirement for a specific API call
bool CoreChecks::ValidateImageSampleCount(const IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count, const char *location,
const std::string &msgCode) const {
bool skip = false;
if (image_state->createInfo.samples != sample_count) {
skip = LogError(image_state->image, msgCode, "%s for %s was created with a sample count of %s but must be %s.", location,
report_data->FormatHandle(image_state->image).c_str(),
string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
}
return skip;
}
bool CoreChecks::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize dataSize, const void *pData) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
const auto dst_buffer_state = GetBufferState(dstBuffer);
assert(dst_buffer_state);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-dstBuffer-00035");
// Validate that DST buffer has correct usage flags set
skip |=
ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdUpdateBuffer-dstBuffer-00034",
"vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmd(cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
skip |=
ValidateProtectedBuffer(cb_state, dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01813");
skip |=
ValidateUnprotectedBuffer(cb_state, dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01814");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETEVENT, "vkCmdSetEvent()");
Location loc(Func::vkCmdSetEvent, Field::stageMask);
LogObjectList objects(commandBuffer);
skip |= ValidatePipelineStage(objects, loc, GetQueueFlags(*cb_state), stageMask);
skip |= ValidateStageMaskHost(loc, stageMask);
return skip;
}
bool CoreChecks::PreCallValidateCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
const VkDependencyInfoKHR *pDependencyInfo) const {
const char *func = "vkCmdSetEvent2KHR()";
LogObjectList objects(commandBuffer);
objects.add(event);
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETEVENT, func);
Location loc(Func::vkCmdSetEvent2KHR, Field::pDependencyInfo);
if (pDependencyInfo->dependencyFlags != 0) {
skip |= LogError(objects, "VUID-vkCmdSetEvent2KHR-dependencyFlags-03825", "%s (%s) must be 0",
loc.dot(Field::dependencyFlags).Message().c_str(),
string_VkDependencyFlags(pDependencyInfo->dependencyFlags).c_str());
}
skip |= ValidateDependencyInfo(objects, loc, cb_state, kGeneral, pDependencyInfo);
return skip;
}
bool CoreChecks::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
LogObjectList objects(commandBuffer);
Location loc(Func::vkCmdResetEvent, Field::stageMask);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_RESETEVENT, "vkCmdResetEvent()");
skip |= ValidatePipelineStage(objects, loc, GetQueueFlags(*cb_state), stageMask);
skip |= ValidateStageMaskHost(loc, stageMask);
return skip;
}
bool CoreChecks::PreCallValidateCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
VkPipelineStageFlags2KHR stageMask) const {
const char *func = "vkCmdResetEvent2KHR()";
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
LogObjectList objects(commandBuffer);
Location loc(Func::vkCmdResetEvent2KHR, Field::stageMask);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_RESETEVENT, func);
skip |= ValidatePipelineStage(objects, loc, GetQueueFlags(*cb_state), stageMask);
skip |= ValidateStageMaskHost(loc, stageMask);
return skip;
}
static bool HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags2KHR inflags) {
return (inflags & ~(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) != 0;
}
// transient helper struct for checking parts of VUID 02285
struct RenderPassDepState {
using Location = core_error::Location;
using Func = core_error::Func;
using Struct = core_error::Struct;
using Field = core_error::Field;
const CoreChecks *core;
const std::string func_name;
const std::string vuid;
uint32_t active_subpass;
const VkRenderPass rp_handle;
const VkPipelineStageFlags2KHR disabled_features;
const std::vector<uint32_t> &self_dependencies;
const safe_VkSubpassDependency2 *dependencies;
RenderPassDepState(const CoreChecks *c, const std::string &f, const std::string &v, uint32_t subpass, const VkRenderPass handle,
const DeviceFeatures &features, const std::vector<uint32_t> &self_deps,
const safe_VkSubpassDependency2 *deps)
: core(c),
func_name(f),
vuid(v),
active_subpass(subpass),
rp_handle(handle),
disabled_features(sync_utils::DisabledPipelineStages(features)),
self_dependencies(self_deps),
dependencies(deps) {}
VkMemoryBarrier2KHR GetSubPassDepBarrier(const safe_VkSubpassDependency2 &dep) {
VkMemoryBarrier2KHR result;
const auto *barrier = LvlFindInChain<VkMemoryBarrier2KHR>(dep.pNext);
if (barrier) {
result = *barrier;
} else {
result.srcStageMask = dep.srcStageMask;
result.dstStageMask = dep.dstStageMask;
result.srcAccessMask = dep.srcAccessMask;
result.dstAccessMask = dep.dstAccessMask;
}
return result;
}
bool ValidateStage(const Location &loc, VkPipelineStageFlags2KHR src_stage_mask, VkPipelineStageFlags2KHR dst_stage_mask) {
// Look for matching mask in any self-dependency
bool match = false;
for (const auto self_dep_index : self_dependencies) {
const auto sub_dep = GetSubPassDepBarrier(dependencies[self_dep_index]);
auto sub_src_stage_mask =
sync_utils::ExpandPipelineStages(sub_dep.srcStageMask, sync_utils::kAllQueueTypes, disabled_features);
auto sub_dst_stage_mask =
sync_utils::ExpandPipelineStages(sub_dep.dstStageMask, sync_utils::kAllQueueTypes, disabled_features);
match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(src_stage_mask == (sub_src_stage_mask & src_stage_mask))) &&
((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask)));
if (match) break;
}
if (!match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
core->LogError(rp_handle, vuid,
"%s (0x%" PRIx64
") is not a subset of VkSubpassDependency srcAccessMask "
"for any self-dependency of subpass %d of %s for which dstAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
loc.dot(Field::srcStageMask).Message().c_str(), src_stage_mask, active_subpass,
core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str());
core->LogError(rp_handle, vuid,
"%s (0x%" PRIx64
") is not a subset of VkSubpassDependency dstAccessMask "
"for any self-dependency of subpass %d of %s for which srcAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
loc.dot(Field::dstStageMask).Message().c_str(), dst_stage_mask, active_subpass,
core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str());
}
return !match;
}
bool ValidateAccess(const Location &loc, VkAccessFlags2KHR src_access_mask, VkAccessFlags2KHR dst_access_mask) {
bool match = false;
for (const auto self_dep_index : self_dependencies) {
const auto sub_dep = GetSubPassDepBarrier(dependencies[self_dep_index]);
match = (src_access_mask == (sub_dep.srcAccessMask & src_access_mask)) &&
(dst_access_mask == (sub_dep.dstAccessMask & dst_access_mask));
if (match) break;
}
if (!match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
core->LogError(rp_handle, vuid,
"%s (0x%" PRIx64
") is not a subset of VkSubpassDependency "
"srcAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
loc.dot(Field::srcAccessMask).Message().c_str(), src_access_mask, active_subpass,
core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str());
core->LogError(rp_handle, vuid,
"%s (0x%" PRIx64
") is not a subset of VkSubpassDependency "
"dstAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
loc.dot(Field::dstAccessMask).Message().c_str(), dst_access_mask, active_subpass,
core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str());
}
return !match;
}
bool ValidateDependencyFlag(VkDependencyFlags dependency_flags) {
bool match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
match = sub_dep.dependencyFlags == dependency_flags;
if (match) break;
}
if (!match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
core->LogError(rp_handle, vuid,
"%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any "
"self-dependency of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
func_name.c_str(), dependency_flags, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
}
return !match;
}
};
// Validate VUs for Pipeline Barriers that are within a renderPass
// Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
bool CoreChecks::ValidateRenderPassPipelineBarriers(const Location &outer_loc, const CMD_BUFFER_STATE *cb_state,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
const VkBufferMemoryBarrier *buffer_mem_barriers,
uint32_t image_mem_barrier_count,
const VkImageMemoryBarrier *image_barriers) const {
bool skip = false;
const auto& rp_state = cb_state->activeRenderPass;
RenderPassDepState state(this, outer_loc.StringFunc().c_str(), "VUID-vkCmdPipelineBarrier-pDependencies-02285",
cb_state->activeSubpass, rp_state->renderPass, enabled_features,
rp_state->self_dependencies[cb_state->activeSubpass], rp_state->createInfo.pDependencies);
if (state.self_dependencies.size() == 0) {
skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s Barriers cannot be set during subpass %d of %s with no self-dependency specified.",
outer_loc.Message().c_str(), state.active_subpass, report_data->FormatHandle(state.rp_handle).c_str());
return skip;
}
// Grab ref to current subpassDescription up-front for use below
const auto &sub_desc = rp_state->createInfo.pSubpasses[state.active_subpass];
skip |= state.ValidateStage(outer_loc, src_stage_mask, dst_stage_mask);
if (0 != buffer_mem_barrier_count) {
skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178",
"%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", state.func_name.c_str(),
buffer_mem_barrier_count, state.active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str());
}
for (uint32_t i = 0; i < mem_barrier_count; ++i) {
const auto &mem_barrier = mem_barriers[i];
Location loc(outer_loc.function, Struct::VkMemoryBarrier, Field::pMemoryBarriers, i);
skip |= state.ValidateAccess(loc, mem_barrier.srcAccessMask, mem_barrier.dstAccessMask);
}
for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
const auto &img_barrier = image_barriers[i];
Location loc(outer_loc.function, Struct::VkImageMemoryBarrier, Field::pImageMemoryBarriers, i);
skip |= state.ValidateAccess(loc, img_barrier.srcAccessMask, img_barrier.dstAccessMask);
if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182",
"%s is %d and dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
loc.dot(Field::srcQueueFamilyIndex).Message().c_str(), img_barrier.srcQueueFamilyIndex,
img_barrier.dstQueueFamilyIndex);
}
// Secondary CBs can have null framebuffer so record will queue up validation in that case 'til FB is known
if (VK_NULL_HANDLE != cb_state->activeFramebuffer) {
skip |= ValidateImageBarrierAttachment(loc, cb_state, cb_state->activeFramebuffer.get(), state.active_subpass, sub_desc,
state.rp_handle, img_barrier);
}
}
skip |= state.ValidateDependencyFlag(dependency_flags);
return skip;
}
bool CoreChecks::ValidateRenderPassPipelineBarriers(const Location &outer_loc, const CMD_BUFFER_STATE *cb_state,
const VkDependencyInfoKHR *dep_info) const {
bool skip = false;
const auto& rp_state = cb_state->activeRenderPass;
RenderPassDepState state(this, outer_loc.StringFunc().c_str(), "VUID-vkCmdPipelineBarrier2KHR-pDependencies-02285",
cb_state->activeSubpass, rp_state->renderPass, enabled_features,
rp_state->self_dependencies[cb_state->activeSubpass], rp_state->createInfo.pDependencies);
if (state.self_dependencies.size() == 0) {
skip |= LogError(state.rp_handle, state.vuid,
"%s: Barriers cannot be set during subpass %d of %s with no self-dependency specified.",
state.func_name.c_str(), state.active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str());
return skip;
}
// Grab ref to current subpassDescription up-front for use below
const auto &sub_desc = rp_state->createInfo.pSubpasses[state.active_subpass];
for (uint32_t i = 0; i < dep_info->memoryBarrierCount; ++i) {
const auto &mem_barrier = dep_info->pMemoryBarriers[i];
Location loc(outer_loc.function, Struct::VkMemoryBarrier2KHR, Field::pMemoryBarriers, i);
skip |= state.ValidateStage(loc, mem_barrier.srcStageMask, mem_barrier.dstStageMask);
skip |= state.ValidateAccess(loc, mem_barrier.srcAccessMask, mem_barrier.dstAccessMask);
}
if (0 != dep_info->bufferMemoryBarrierCount) {
skip |=
LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier2KHR-bufferMemoryBarrierCount-01178",
"%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", state.func_name.c_str(),
dep_info->bufferMemoryBarrierCount, state.active_subpass, report_data->FormatHandle(state.rp_handle).c_str());
}
for (uint32_t i = 0; i < dep_info->imageMemoryBarrierCount; ++i) {
const auto &img_barrier = dep_info->pImageMemoryBarriers[i];
Location loc(outer_loc.function, Struct::VkImageMemoryBarrier2KHR, Field::pImageMemoryBarriers, i);
skip |= state.ValidateStage(loc, img_barrier.srcStageMask, img_barrier.dstStageMask);
skip |= state.ValidateAccess(loc, img_barrier.srcAccessMask, img_barrier.dstAccessMask);
if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier2KHR-srcQueueFamilyIndex-01182",
"%s is %d and dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
loc.dot(Field::srcQueueFamilyIndex).Message().c_str(), img_barrier.srcQueueFamilyIndex,
img_barrier.dstQueueFamilyIndex);
}
// Secondary CBs can have null framebuffer so record will queue up validation in that case 'til FB is known
if (VK_NULL_HANDLE != cb_state->activeFramebuffer) {
skip |= ValidateImageBarrierAttachment(loc, cb_state, cb_state->activeFramebuffer.get(), state.active_subpass, sub_desc,
state.rp_handle, img_barrier);
}
}
skip |= state.ValidateDependencyFlag(dep_info->dependencyFlags);
return skip;
}
bool CoreChecks::ValidateStageMasksAgainstQueueCapabilities(const LogObjectList &objects, const Location &loc,
VkQueueFlags queue_flags, VkPipelineStageFlags2KHR stage_mask) const {
bool skip = false;
// these are always allowed.
stage_mask &= ~(VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR | VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR |
VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR | VK_PIPELINE_STAGE_2_HOST_BIT_KHR);
if (stage_mask == 0) {
return skip;
}
static const std::map<VkPipelineStageFlags2KHR, VkQueueFlags> metaFlags{
{VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
{VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR, VK_QUEUE_GRAPHICS_BIT},
};
for (const auto &entry : metaFlags) {
if (((entry.first & stage_mask) != 0) && ((entry.second & queue_flags) == 0)) {
const auto& vuid = sync_vuid_maps::GetStageQueueCapVUID(loc, entry.first);
skip |= LogError(objects, vuid,
"%s flag %s is not compatible with the queue family properties (%s) of this command buffer.",
loc.Message().c_str(), sync_utils::StringPipelineStageFlags(entry.first).c_str(),
string_VkQueueFlags(queue_flags).c_str());
}
stage_mask &= ~entry.first;
}
if (stage_mask == 0) {
return skip;
}
auto supported_flags = sync_utils::ExpandPipelineStages(VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR, queue_flags);
auto bad_flags = stage_mask & ~supported_flags;
// Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
for (size_t i = 0; i < sizeof(bad_flags) * 8; i++) {
VkPipelineStageFlags2KHR bit = (1ULL << i) & bad_flags;
if (bit) {
const auto& vuid = sync_vuid_maps::GetStageQueueCapVUID(loc, bit);
skip |= LogError(
objects, vuid, "%s flag %s is not compatible with the queue family properties (%s) of this command buffer.",
loc.Message().c_str(), sync_utils::StringPipelineStageFlags(bit).c_str(), string_VkQueueFlags(queue_flags).c_str());
}
}
return skip;
}
bool CoreChecks::ValidatePipelineStageFeatureEnables(const LogObjectList &objects, const Location &loc,
VkPipelineStageFlags2KHR stage_mask) const {
bool skip = false;
if (!enabled_features.synchronization2_features.synchronization2 && stage_mask == 0) {
const auto& vuid = sync_vuid_maps::GetBadFeatureVUID(loc, 0);
std::stringstream msg;
msg << loc.Message() << " must not be 0 unless synchronization2 is enabled.";
skip |= LogError(objects, vuid, "%s", msg.str().c_str());
}
auto disabled_stages = sync_utils::DisabledPipelineStages(enabled_features);
auto bad_bits = stage_mask & disabled_stages;
if (bad_bits == 0) {
return skip;
}
for (size_t i = 0; i < sizeof(bad_bits) * 8; i++) {
VkPipelineStageFlags2KHR bit = 1ULL << i;
if (bit & bad_bits) {
const auto& vuid = sync_vuid_maps::GetBadFeatureVUID(loc, bit);
std::stringstream msg;
msg << loc.Message() << " includes " << sync_utils::StringPipelineStageFlags(bit) << " when the device does not have "
<< sync_vuid_maps::kFeatureNameMap.at(bit) << " feature enabled.";
skip |= LogError(objects, vuid, "%s", msg.str().c_str());
}
}
return skip;
}
bool CoreChecks::ValidatePipelineStage(const LogObjectList &objects, const Location &loc, VkQueueFlags queue_flags,
VkPipelineStageFlags2KHR stage_mask) const {
bool skip = false;
skip |= ValidateStageMasksAgainstQueueCapabilities(objects, loc, queue_flags, stage_mask);
skip |= ValidatePipelineStageFeatureEnables(objects, loc, stage_mask);
return skip;
}
bool CoreChecks::ValidateAccessMask(const LogObjectList &objects, const Location &loc, VkQueueFlags queue_flags,
VkAccessFlags2KHR access_mask, VkPipelineStageFlags2KHR stage_mask) const {
bool skip = false;
// Early out if all commands set
if ((stage_mask & VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR) != 0) return skip;
// or if only generic memory accesses are specified (or we got a 0 mask)
access_mask &= ~(VK_ACCESS_2_MEMORY_READ_BIT_KHR | VK_ACCESS_2_MEMORY_WRITE_BIT_KHR);
if (access_mask == 0) return skip;
auto expanded_stages = sync_utils::ExpandPipelineStages(stage_mask, queue_flags); // TODO:
auto valid_accesses = sync_utils::CompatibleAccessMask(expanded_stages);
auto bad_accesses = (access_mask & ~valid_accesses);
if (bad_accesses == 0) {
return skip;
}
for (size_t i = 0; i < sizeof(bad_accesses) * 8; i++) {
VkAccessFlags2KHR bit = (1ULL << i);
if (bad_accesses & bit) {
const auto& vuid = sync_vuid_maps::GetBadAccessFlagsVUID(loc, bit);
std::stringstream msg;
msg << loc.Message() << " bit " << sync_utils::StringAccessFlags(bit) << " is not supported by stage mask ("
<< sync_utils::StringPipelineStageFlags(stage_mask) << ").";
skip |= LogError(objects, vuid, "%s", msg.str().c_str());
}
}
return skip;
}
bool CoreChecks::ValidateEventStageMask(const ValidationStateTracker *state_data, const CMD_BUFFER_STATE *pCB, size_t eventCount,
size_t firstEventIndex, VkPipelineStageFlags2KHR sourceStageMask,
EventToStageMap *localEventToStageMap) {
bool skip = false;
VkPipelineStageFlags2KHR stage_mask = 0;
const auto max_event = std::min((firstEventIndex + eventCount), pCB->events.size());
for (size_t event_index = firstEventIndex; event_index < max_event; ++event_index) {
auto event = pCB->events[event_index];
auto event_data = localEventToStageMap->find(event);
if (event_data != localEventToStageMap->end()) {
stage_mask |= event_data->second;
} else {
auto global_event_data = state_data->GetEventState(event);
if (!global_event_data) {
skip |= state_data->LogError(event, kVUID_Core_DrawState_InvalidEvent,
"%s cannot be waited on if it has never been set.",
state_data->report_data->FormatHandle(event).c_str());
} else {
stage_mask |= global_event_data->stageMask;
}
}
}
// TODO: Need to validate that host_bit is only set if set event is called
// but set event can be called at any time.
if (sourceStageMask != stage_mask && sourceStageMask != (stage_mask | VK_PIPELINE_STAGE_HOST_BIT)) {
skip |= state_data->LogError(
pCB->commandBuffer, "VUID-vkCmdWaitEvents-srcStageMask-parameter",
"Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%" PRIx64
" which must be the bitwise OR of "
"the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
"vkSetEvent but instead is 0x%" PRIx64 ".",
sourceStageMask, stage_mask);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
auto queue_flags = GetQueueFlags(*cb_state);
LogObjectList objects(commandBuffer);
Location loc(Func::vkCmdWaitEvents);
skip |= ValidatePipelineStage(objects, loc.dot(Field::srcStageMask), queue_flags, srcStageMask);
skip |= ValidatePipelineStage(objects, loc.dot(Field::dstStageMask), queue_flags, dstStageMask);
skip |= ValidateCmd(cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
skip |=
ValidateBarriers(loc.dot(Field::pDependencyInfo), cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
return skip;
}
bool CoreChecks::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
const VkDependencyInfoKHR *pDependencyInfos) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
for (uint32_t i = 0; (i < eventCount) && !skip; i++) {
LogObjectList objects(commandBuffer);
objects.add(pEvents[i]);
Location loc(Func::vkCmdWaitEvents2KHR, Field::pDependencyInfos, i);
if (pDependencyInfos[i].dependencyFlags != 0) {
skip |= LogError(objects, "VUID-vkCmdWaitEvents2KHR-dependencyFlags-03844", "%s (%s) must be 0.",
loc.dot(Field::dependencyFlags).Message().c_str(),
string_VkDependencyFlags(pDependencyInfos[i].dependencyFlags).c_str());
}
skip |= ValidateDependencyInfo(objects, loc, cb_state, kGeneral, &pDependencyInfos[i]);
}
skip |= ValidateCmd(cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
return skip;
}
void CoreChecks::PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// The StateTracker added will add to the events vector.
auto first_event_index = cb_state->events.size();
StateTracker::PreCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
auto event_added_count = cb_state->events.size() - first_event_index;
const CMD_BUFFER_STATE *cb_state_const = cb_state;
cb_state->eventUpdates.emplace_back(
[cb_state_const, event_added_count, first_event_index, sourceStageMask](
const ValidationStateTracker *device_data, bool do_validate, EventToStageMap *localEventToStageMap) {
if (!do_validate) return false;
return ValidateEventStageMask(device_data, cb_state_const, event_added_count, first_event_index, sourceStageMask,
localEventToStageMap);
});
TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
void CoreChecks::PreCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
const VkDependencyInfoKHR *pDependencyInfos) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// The StateTracker added will add to the events vector.
auto first_event_index = cb_state->events.size();
StateTracker::PreCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos);
auto event_added_count = cb_state->events.size() - first_event_index;
const CMD_BUFFER_STATE *cb_state_const = cb_state;
for (uint32_t i = 0; i < eventCount; i++) {
const auto &dep_info = pDependencyInfos[i];
auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info);
cb_state->eventUpdates.emplace_back(
[cb_state_const, event_added_count, first_event_index, stage_masks](
const ValidationStateTracker *device_data, bool do_validate, EventToStageMap *localEventToStageMap) {
if (!do_validate) return false;
return ValidateEventStageMask(device_data, cb_state_const, event_added_count, first_event_index, stage_masks.src,
localEventToStageMap);
});
TransitionImageLayouts(cb_state, dep_info.imageMemoryBarrierCount, dep_info.pImageMemoryBarriers);
}
}
void CoreChecks::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordBarriers(Func::vkCmdWaitEvents, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
}
void CoreChecks::PostCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
const VkDependencyInfoKHR *pDependencyInfos) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
for (uint32_t i = 0; i < eventCount; i++) {
const auto &dep_info = pDependencyInfos[i];
RecordBarriers(Func::vkCmdWaitEvents2KHR, cb_state, dep_info);
}
}
bool CoreChecks::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
LogObjectList objects(commandBuffer);
auto queue_flags = GetQueueFlags(*cb_state);
Location loc(Func::vkCmdPipelineBarrier);
auto op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
if (op_type == kAllRelease || op_type == kGeneral) {
skip |= ValidatePipelineStage(objects, loc.dot(Field::srcStageMask), queue_flags, srcStageMask);
}
if (op_type == kAllAcquire || op_type == kGeneral) {
skip |= ValidatePipelineStage(objects, loc.dot(Field::dstStageMask), queue_flags, dstStageMask);
}
skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
if (cb_state->activeRenderPass) {
skip |= ValidateRenderPassPipelineBarriers(loc, cb_state, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
if (skip) return true; // Early return to avoid redundant errors from below calls
} else {
if (dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) {
skip = LogError(objects, "VUID-vkCmdPipelineBarrier-dependencyFlags-01186",
"%s VK_DEPENDENCY_VIEW_LOCAL_BIT must not be set outside of a render pass instance",
loc.dot(Field::dependencyFlags).Message().c_str());
}
}
skip |= ValidateBarriers(loc, cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
return skip;
}
bool CoreChecks::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer,
const VkDependencyInfoKHR *pDependencyInfo) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
LogObjectList objects(commandBuffer);
auto op_type =
ComputeBarrierOperationsType(cb_state, pDependencyInfo->bufferMemoryBarrierCount, pDependencyInfo->pBufferMemoryBarriers,
pDependencyInfo->imageMemoryBarrierCount, pDependencyInfo->pImageMemoryBarriers);
Location loc(Func::vkCmdPipelineBarrier2KHR, Field::pDependencyInfo);
skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
if (cb_state->activeRenderPass) {
skip |= ValidateRenderPassPipelineBarriers(loc, cb_state, pDependencyInfo);
if (skip) return true; // Early return to avoid redundant errors from below calls
} else {
if (pDependencyInfo->dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) {
skip = LogError(objects, "VUID-vkCmdPipelineBarrier2KHR-dependencyFlags-01186",
"%s VK_DEPENDENCY_VIEW_LOCAL_BIT must not be set outside of a render pass instance",
loc.dot(Field::dependencyFlags).Message().c_str());
}
}
skip |= ValidateDependencyInfo(objects, loc, cb_state, op_type, pDependencyInfo);
return skip;
}
void CoreChecks::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordBarriers(Func::vkCmdPipelineBarrier, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
void CoreChecks::PreCallRecordCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordBarriers(Func::vkCmdPipelineBarrier2KHR, cb_state, *pDependencyInfo);
TransitionImageLayouts(cb_state, pDependencyInfo->imageMemoryBarrierCount, pDependencyInfo->pImageMemoryBarriers);
}
bool CoreChecks::ValidateBeginQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, VkFlags flags,
uint32_t index, CMD_TYPE cmd,
const char *cmd_name, const ValidateBeginQueryVuids *vuids) const {
bool skip = false;
const auto *query_pool_state = GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_TIMESTAMP) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBeginQuery-queryType-02804",
"%s: The querypool's query type must not be VK_QUERY_TYPE_TIMESTAMP.", cmd_name);
}
// Check for nested queries
if (cb_state->activeQueries.size()) {
for (const auto &a_query : cb_state->activeQueries) {
auto active_query_pool_state = GetQueryPoolState(a_query.pool);
if (active_query_pool_state->createInfo.queryType == query_pool_ci.queryType && a_query.index == index) {
LogObjectList obj_list(cb_state->commandBuffer);
obj_list.add(query_obj.pool);
obj_list.add(a_query.pool);
skip |= LogError(obj_list, vuids->vuid_dup_query_type,
"%s: Within the same command buffer %s, query %d from pool %s has same queryType as active query "
"%d from pool %s.",
cmd_name, report_data->FormatHandle(cb_state->commandBuffer).c_str(), query_obj.index,
report_data->FormatHandle(query_obj.pool).c_str(), a_query.index,
report_data->FormatHandle(a_query.pool).c_str());
}
}
}
// There are tighter queue constraints to test for certain query pools
if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) {
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_feedback);
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_OCCLUSION) {
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_occlusion);
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (!cb_state->performance_lock_acquired) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_profile_lock,
"%s: profiling lock must be held before vkBeginCommandBuffer is called on "
"a command buffer where performance queries are recorded.",
cmd_name);
}
if (query_pool_state->has_perf_scope_command_buffer && cb_state->commandCount > 0) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_scope_not_first,
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but %s is not the first recorded "
"command in the command buffer.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_scope_in_rp,
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
}
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags);
if (flags & VK_QUERY_CONTROL_PRECISE_BIT) {
if (!enabled_features.core.occlusionQueryPrecise) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_precise,
"%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but precise occlusion queries not enabled on the device.",
cmd_name);
}
if (query_pool_ci.queryType != VK_QUERY_TYPE_OCCLUSION) {
skip |=
LogError(cb_state->commandBuffer, vuids->vuid_precise,
"%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but pool query type is not VK_QUERY_TYPE_OCCLUSION", cmd_name);
}
}
if (query_obj.query >= query_pool_ci.queryCount) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_query_count,
"%s: Query index %" PRIu32 " must be less than query count %" PRIu32 " of %s.", cmd_name, query_obj.query,
query_pool_ci.queryCount, report_data->FormatHandle(query_obj.pool).c_str());
}
if (cb_state->unprotected == false) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_protected_cb,
"%s: command can't be used in protected command buffers.", cmd_name);
}
skip |= ValidateCmd(cb_state, cmd, cmd_name);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot,
VkFlags flags) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
QueryObject query_obj(queryPool, slot);
struct BeginQueryVuids : ValidateBeginQueryVuids {
BeginQueryVuids() : ValidateBeginQueryVuids() {
vuid_queue_flags = "VUID-vkCmdBeginQuery-commandBuffer-cmdpool";
vuid_queue_feedback = "VUID-vkCmdBeginQuery-queryType-02327";
vuid_queue_occlusion = "VUID-vkCmdBeginQuery-queryType-00803";
vuid_precise = "VUID-vkCmdBeginQuery-queryType-00800";
vuid_query_count = "VUID-vkCmdBeginQuery-query-00802";
vuid_profile_lock = "VUID-vkCmdBeginQuery-queryPool-03223";
vuid_scope_not_first = "VUID-vkCmdBeginQuery-queryPool-03224";
vuid_scope_in_rp = "VUID-vkCmdBeginQuery-queryPool-03225";
vuid_dup_query_type = "VUID-vkCmdBeginQuery-queryPool-01922";
vuid_protected_cb = "VUID-vkCmdBeginQuery-commandBuffer-01885";
}
};
BeginQueryVuids vuids;
return ValidateBeginQuery(cb_state, query_obj, flags, 0, CMD_BEGINQUERY, "vkCmdBeginQuery()", &vuids);
}
bool CoreChecks::VerifyQueryIsReset(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, QueryObject query_obj,
const char *func_name, VkQueryPool &firstPerfQueryPool, uint32_t perfPass,
QueryMap *localQueryToStateMap) {
bool skip = false;
const auto *query_pool_state = state_data->GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
QueryState state = state_data->GetQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass);
// If reset was in another command buffer, check the global map
if (state == QUERYSTATE_UNKNOWN) {
state = state_data->GetQueryState(&state_data->queryToStateMap, query_obj.pool, query_obj.query, perfPass);
}
// Performance queries have limitation upon when they can be
// reset.
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR && state == QUERYSTATE_UNKNOWN &&
perfPass >= query_pool_state->n_performance_passes) {
// If the pass is invalid, assume RESET state, another error
// will be raised in ValidatePerformanceQuery().
state = QUERYSTATE_RESET;
}
if (state != QUERYSTATE_RESET) {
skip |= state_data->LogError(commandBuffer, kVUID_Core_DrawState_QueryNotReset,
"%s: %s and query %" PRIu32
": query not reset. "
"After query pool creation, each query must be reset before it is used. "
"Queries must also be reset between uses.",
func_name, state_data->report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query);
}
return skip;
}
bool CoreChecks::ValidatePerformanceQuery(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer,
QueryObject query_obj, const char *func_name, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
const auto *query_pool_state = state_data->GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return false;
const CMD_BUFFER_STATE *cb_state = state_data->GetCBState(commandBuffer);
bool skip = false;
if (perfPass >= query_pool_state->n_performance_passes) {
skip |= state_data->LogError(commandBuffer, "VUID-VkPerformanceQuerySubmitInfoKHR-counterPassIndex-03221",
"Invalid counterPassIndex (%u, maximum allowed %u) value for query pool %s.", perfPass,
query_pool_state->n_performance_passes,
state_data->report_data->FormatHandle(query_obj.pool).c_str());
}
if (!cb_state->performance_lock_acquired || cb_state->performance_lock_released) {
skip |= state_data->LogError(commandBuffer, "VUID-vkQueueSubmit-pCommandBuffers-03220",
"Commandbuffer %s was submitted and contains a performance query but the"
"profiling lock was not held continuously throughout the recording of commands.",
state_data->report_data->FormatHandle(commandBuffer).c_str());
}
QueryState command_buffer_state = state_data->GetQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass);
if (command_buffer_state == QUERYSTATE_RESET) {
skip |= state_data->LogError(
commandBuffer, query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-None-02863" : "VUID-vkCmdBeginQuery-None-02863",
"VkQuery begin command recorded in a command buffer that, either directly or "
"through secondary command buffers, also contains a vkCmdResetQueryPool command "
"affecting the same query.");
}
if (firstPerfQueryPool != VK_NULL_HANDLE) {
if (firstPerfQueryPool != query_obj.pool &&
!state_data->enabled_features.performance_query_features.performanceCounterMultipleQueryPools) {
skip |= state_data->LogError(
commandBuffer,
query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03226" : "VUID-vkCmdBeginQuery-queryPool-03226",
"Commandbuffer %s contains more than one performance query pool but "
"performanceCounterMultipleQueryPools is not enabled.",
state_data->report_data->FormatHandle(commandBuffer).c_str());
}
} else {
firstPerfQueryPool = query_obj.pool;
}
return skip;
}
void CoreChecks::EnqueueVerifyBeginQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj, const char *func_name) {
CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
// Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord
cb_state->queryUpdates.emplace_back([command_buffer, query_obj, func_name](const ValidationStateTracker *device_data,
bool do_validate, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
bool skip = false;
skip |= ValidatePerformanceQuery(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass,
localQueryToStateMap);
skip |= VerifyQueryIsReset(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass,
localQueryToStateMap);
return skip;
});
}
void CoreChecks::PreCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
if (disabled[query_validation]) return;
QueryObject query_obj = {queryPool, slot};
EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQuery()");
}
void CoreChecks::EnqueueVerifyEndQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj) {
CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
// Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord
cb_state->queryUpdates.emplace_back([command_buffer, query_obj](const ValidationStateTracker *device_data, bool do_validate,
VkQueryPool &firstPerfQueryPool, uint32_t perfPass,
QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
bool skip = false;
const CMD_BUFFER_STATE *cb_state = device_data->GetCBState(command_buffer);
const auto *query_pool_state = device_data->GetQueryPoolState(query_obj.pool);
if (query_pool_state->has_perf_scope_command_buffer && (cb_state->commandCount - 1) != query_obj.endCommandIndex) {
skip |= device_data->LogError(command_buffer, "VUID-vkCmdEndQuery-queryPool-03227",
"vkCmdEndQuery: Query pool %s was created with a counter of scope"
"VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but the end of the query is not the last "
"command in the command buffer %s.",
device_data->report_data->FormatHandle(query_obj.pool).c_str(),
device_data->report_data->FormatHandle(command_buffer).c_str());
}
return skip;
});
}
bool CoreChecks::ValidateCmdEndQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, uint32_t index, CMD_TYPE cmd,
const char *cmd_name, const ValidateEndQueryVuids *vuids) const {
bool skip = false;
if (!cb_state->activeQueries.count(query_obj)) {
skip |=
LogError(cb_state->commandBuffer, vuids->vuid_active_queries, "%s: Ending a query before it was started: %s, index %d.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query);
}
const auto *query_pool_state = GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdEndQuery-queryPool-03228",
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
}
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags);
skip |= ValidateCmd(cb_state, cmd, cmd_name);
if (cb_state->unprotected == false) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_protected_cb,
"%s: command can't be used in protected command buffers.", cmd_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) const {
if (disabled[query_validation]) return false;
bool skip = false;
QueryObject query_obj = {queryPool, slot};
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
const uint32_t available_query_count = query_pool_state->createInfo.queryCount;
// Only continue validating if the slot is even within range
if (slot >= available_query_count) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdEndQuery-query-00810",
"vkCmdEndQuery(): query index (%u) is greater or equal to the queryPool size (%u).", slot,
available_query_count);
} else {
struct EndQueryVuids : ValidateEndQueryVuids {
EndQueryVuids() : ValidateEndQueryVuids() {
vuid_queue_flags = "VUID-vkCmdEndQuery-commandBuffer-cmdpool";
vuid_active_queries = "VUID-vkCmdEndQuery-None-01923";
vuid_protected_cb = "VUID-vkCmdEndQuery-commandBuffer-01886";
}
};
EndQueryVuids vuids;
skip |= ValidateCmdEndQuery(cb_state, query_obj, 0, CMD_ENDQUERY, "vkCmdEndQuery()", &vuids);
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
if (disabled[query_validation]) return;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query_obj = {queryPool, slot};
query_obj.endCommandIndex = cb_state->commandCount - 1;
EnqueueVerifyEndQuery(commandBuffer, query_obj);
}
bool CoreChecks::ValidateQueryPoolIndex(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, const char *func_name,
const char *first_vuid, const char *sum_vuid) const {
bool skip = false;
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
const uint32_t available_query_count = query_pool_state->createInfo.queryCount;
if (firstQuery >= available_query_count) {
skip |= LogError(queryPool, first_vuid,
"%s: In Query %s the firstQuery (%u) is greater or equal to the queryPool size (%u).", func_name,
report_data->FormatHandle(queryPool).c_str(), firstQuery, available_query_count);
}
if ((firstQuery + queryCount) > available_query_count) {
skip |=
LogError(queryPool, sum_vuid,
"%s: In Query %s the sum of firstQuery (%u) + queryCount (%u) is greater than the queryPool size (%u).",
func_name, report_data->FormatHandle(queryPool).c_str(), firstQuery, queryCount, available_query_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "VkCmdResetQueryPool()",
"VUID-vkCmdResetQueryPool-firstQuery-00796", "VUID-vkCmdResetQueryPool-firstQuery-00797");
return skip;
}
static QueryResultType GetQueryResultType(QueryState state, VkQueryResultFlags flags) {
switch (state) {
case QUERYSTATE_UNKNOWN:
return QUERYRESULT_UNKNOWN;
case QUERYSTATE_RESET:
case QUERYSTATE_RUNNING:
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
return ((state == QUERYSTATE_RESET) ? QUERYRESULT_WAIT_ON_RESET : QUERYRESULT_WAIT_ON_RUNNING);
} else if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
return QUERYRESULT_SOME_DATA;
} else {
return QUERYRESULT_NO_DATA;
}
case QUERYSTATE_ENDED:
if ((flags & VK_QUERY_RESULT_WAIT_BIT) || (flags & VK_QUERY_RESULT_PARTIAL_BIT) ||
(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
return QUERYRESULT_SOME_DATA;
} else {
return QUERYRESULT_UNKNOWN;
}
case QUERYSTATE_AVAILABLE:
return QUERYRESULT_SOME_DATA;
}
assert(false);
return QUERYRESULT_UNKNOWN;
}
bool CoreChecks::ValidateCopyQueryPoolResults(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer,
VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, uint32_t perfPass,
VkQueryResultFlags flags, QueryMap *localQueryToStateMap) {
bool skip = false;
for (uint32_t i = 0; i < queryCount; i++) {
QueryState state = state_data->GetQueryState(localQueryToStateMap, queryPool, firstQuery + i, perfPass);
QueryResultType result_type = GetQueryResultType(state, flags);
if (result_type != QUERYRESULT_SOME_DATA && result_type != QUERYRESULT_UNKNOWN) {
skip |= state_data->LogError(
commandBuffer, kVUID_Core_DrawState_InvalidQuery,
"vkCmdCopyQueryPoolResults(): Requesting a copy from query to buffer on %s query %" PRIu32 ": %s",
state_data->report_data->FormatHandle(queryPool).c_str(), firstQuery + i, string_QueryResultType(result_type));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) const {
if (disabled[query_validation]) return false;
const auto cb_state = GetCBState(commandBuffer);
const auto dst_buff_state = GetBufferState(dstBuffer);
assert(cb_state);
assert(dst_buff_state);
bool skip = ValidateMemoryIsBoundToBuffer(dst_buff_state, "vkCmdCopyQueryPoolResults()",
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826");
skip |= ValidateQueryPoolStride("VUID-vkCmdCopyQueryPoolResults-flags-00822", "VUID-vkCmdCopyQueryPoolResults-flags-00823",
stride, "dstOffset", dstOffset, flags);
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmd(cb_state, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkCmdCopyQueryPoolResults()",
"VUID-vkCmdCopyQueryPoolResults-firstQuery-00820",
"VUID-vkCmdCopyQueryPoolResults-firstQuery-00821");
if (dstOffset >= dst_buff_state->requirements.size) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstOffset-00819",
"vkCmdCopyQueryPoolResults() dstOffset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64
") of buffer (%s).",
dstOffset, dst_buff_state->requirements.size, report_data->FormatHandle(dst_buff_state->buffer).c_str());
} else if (dstOffset + (queryCount * stride) > dst_buff_state->requirements.size) {
skip |=
LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00824",
"vkCmdCopyQueryPoolResults() storage required (0x%" PRIxLEAST64
") equal to dstOffset + (queryCount * stride) is greater than the size (0x%" PRIxLEAST64 ") of buffer (%s).",
dstOffset + (queryCount * stride), dst_buff_state->requirements.size,
report_data->FormatHandle(dst_buff_state->buffer).c_str());
}
auto query_pool_state_iter = queryPoolMap.find(queryPool);
if (query_pool_state_iter != queryPoolMap.end()) {
auto query_pool_state = query_pool_state_iter->second.get();
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
skip |= ValidatePerformanceQueryResults("vkCmdCopyQueryPoolResults", query_pool_state, firstQuery, queryCount, flags);
if (!phys_dev_ext_props.performance_query_props.allowCommandBufferQueryCopies) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-03232",
"vkCmdCopyQueryPoolResults called with query pool %s but "
"VkPhysicalDevicePerformanceQueryPropertiesKHR::allowCommandBufferQueryCopies "
"is not set.",
report_data->FormatHandle(queryPool).c_str());
}
}
if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && ((flags & VK_QUERY_RESULT_PARTIAL_BIT) != 0)) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-00827",
"vkCmdCopyQueryPoolResults() query pool %s was created with VK_QUERY_TYPE_TIMESTAMP so flags must not "
"contain VK_QUERY_RESULT_PARTIAL_BIT.",
report_data->FormatHandle(queryPool).c_str());
}
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL) {
skip |= LogError(queryPool, "VUID-vkCmdCopyQueryPoolResults-queryType-02734",
"vkCmdCopyQueryPoolResults() called but QueryPool %s was created with queryType "
"VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL.",
report_data->FormatHandle(queryPool).c_str());
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) {
if (disabled[query_validation]) return;
auto cb_state = GetCBState(commandBuffer);
cb_state->queryUpdates.emplace_back([commandBuffer, queryPool, firstQuery, queryCount, flags](
const ValidationStateTracker *device_data, bool do_validate,
VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
return ValidateCopyQueryPoolResults(device_data, commandBuffer, queryPool, firstQuery, queryCount, perfPass, flags,
localQueryToStateMap);
});
}
bool CoreChecks::PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
const void *pValues) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip |= ValidateCmd(cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
skip |= ValidatePushConstantRange(offset, size, "vkCmdPushConstants()");
if (0 == stageFlags) {
skip |= LogError(commandBuffer, "VUID-vkCmdPushConstants-stageFlags-requiredbitmask",
"vkCmdPushConstants() call has no stageFlags set.");
}
// Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command
// stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range.
if (!skip) {
const auto &ranges = *GetPipelineLayout(layout)->push_constant_ranges;
VkShaderStageFlags found_stages = 0;
for (const auto &range : ranges) {
if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) {
VkShaderStageFlags matching_stages = range.stageFlags & stageFlags;
if (matching_stages != range.stageFlags) {
skip |=
LogError(commandBuffer, "VUID-vkCmdPushConstants-offset-01796",
"vkCmdPushConstants(): stageFlags (%s, offset (%" PRIu32 "), and size (%" PRIu32
"), must contain all stages in overlapping VkPushConstantRange stageFlags (%s), offset (%" PRIu32
"), and size (%" PRIu32 ") in %s.",
string_VkShaderStageFlags(stageFlags).c_str(), offset, size,
string_VkShaderStageFlags(range.stageFlags).c_str(), range.offset, range.size,
report_data->FormatHandle(layout).c_str());
}
// Accumulate all stages we've found
found_stages = matching_stages | found_stages;
}
}
if (found_stages != stageFlags) {
uint32_t missing_stages = ~found_stages & stageFlags;
skip |= LogError(
commandBuffer, "VUID-vkCmdPushConstants-offset-01795",
"vkCmdPushConstants(): %s, VkPushConstantRange in %s overlapping offset = %d and size = %d, do not contain %s.",
string_VkShaderStageFlags(stageFlags).c_str(), report_data->FormatHandle(layout).c_str(), offset, size,
string_VkShaderStageFlags(missing_stages).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if ((query_pool_state != nullptr) && (query_pool_state->createInfo.queryType != VK_QUERY_TYPE_TIMESTAMP)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp-queryPool-01416",
"vkCmdWriteTimestamp(): Query Pool %s was not created with VK_QUERY_TYPE_TIMESTAMP.",
report_data->FormatHandle(queryPool).c_str());
}
const uint32_t timestamp_valid_bits =
GetPhysicalDeviceState()->queue_family_properties[cb_state->command_pool->queueFamilyIndex].timestampValidBits;
if (timestamp_valid_bits == 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp-timestampValidBits-00829",
"vkCmdWriteTimestamp(): Query Pool %s has a timestampValidBits value of zero.",
report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteTimestamp2KHR(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR stage,
VkQueryPool queryPool, uint32_t slot) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp2KHR()");
Location loc(Func::vkCmdWriteTimestamp2KHR, Field::stage);
if ((stage & (stage - 1)) != 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp2KHR-stage-03859",
"%s (%s) must only set a single pipeline stage.", loc.Message().c_str(),
string_VkPipelineStageFlags2KHR(stage).c_str());
}
skip |= ValidatePipelineStage(LogObjectList(cb_state->commandBuffer), loc, GetQueueFlags(*cb_state), stage);
loc.field = Field::queryPool;
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if ((query_pool_state != nullptr) && (query_pool_state->createInfo.queryType != VK_QUERY_TYPE_TIMESTAMP)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp2KHR-queryPool-03861",
"%s Query Pool %s was not created with VK_QUERY_TYPE_TIMESTAMP.", loc.Message().c_str(),
report_data->FormatHandle(queryPool).c_str());
}
const uint32_t timestampValidBits =
GetPhysicalDeviceState()->queue_family_properties[cb_state->command_pool->queueFamilyIndex].timestampValidBits;
if (timestampValidBits == 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp2KHR-timestampValidBits-03863",
"%s Query Pool %s has a timestampValidBits value of zero.", loc.Message().c_str(),
report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
void CoreChecks::PreCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) {
if (disabled[query_validation]) return;
// Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall...
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query = {queryPool, slot};
const char *func_name = "vkCmdWriteTimestamp()";
cb_state->queryUpdates.emplace_back([commandBuffer, query, func_name](const ValidationStateTracker *device_data,
bool do_validate, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
return VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap);
});
}
void CoreChecks::PreCallRecordCmdWriteTimestamp2KHR(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
VkQueryPool queryPool, uint32_t slot) {
if (disabled[query_validation]) return;
// Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall...
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query = {queryPool, slot};
const char *func_name = "vkCmdWriteTimestamp()";
cb_state->queryUpdates.emplace_back([commandBuffer, query, func_name](const ValidationStateTracker *device_data,
bool do_validate, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
return VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap);
});
}
void CoreChecks::PreCallRecordCmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer,
uint32_t accelerationStructureCount,
const VkAccelerationStructureKHR *pAccelerationStructures,
VkQueryType queryType, VkQueryPool queryPool,
uint32_t firstQuery) {
if (disabled[query_validation]) return;
// Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall...
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
const char *func_name = "vkCmdWriteAccelerationStructuresPropertiesKHR()";
cb_state->queryUpdates.emplace_back([accelerationStructureCount, commandBuffer, firstQuery, func_name, queryPool](
const ValidationStateTracker *device_data, bool do_validate,
VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
bool skip = false;
for (uint32_t i = 0; i < accelerationStructureCount; i++) {
QueryObject query = {{queryPool, firstQuery + i}, perfPass};
skip |= VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass,
localQueryToStateMap);
}
return skip;
});
}
bool CoreChecks::MatchUsage(uint32_t count, const VkAttachmentReference2 *attachments, const VkFramebufferCreateInfo *fbci,
VkImageUsageFlagBits usage_flag, const char *error_code) const {
bool skip = false;
if (attachments) {
for (uint32_t attach = 0; attach < count; attach++) {
if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
// Attachment counts are verified elsewhere, but prevent an invalid access
if (attachments[attach].attachment < fbci->attachmentCount) {
if ((fbci->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) {
const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
auto view_state = GetImageViewState(*image_view);
if (view_state) {
const VkImageCreateInfo *ici = &GetImageState(view_state->create_info.image)->createInfo;
if (ici != nullptr) {
auto creation_usage = ici->usage;
const auto stencil_usage_info = LvlFindInChain<VkImageStencilUsageCreateInfo>(ici->pNext);
if (stencil_usage_info) {
creation_usage |= stencil_usage_info->stencilUsage;
}
if ((creation_usage & usage_flag) == 0) {
skip |= LogError(device, error_code,
"vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
} else {
const VkFramebufferAttachmentsCreateInfo *fbaci =
LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(fbci->pNext);
if (fbaci != nullptr && fbaci->pAttachmentImageInfos != nullptr &&
fbaci->attachmentImageInfoCount > attachments[attach].attachment) {
uint32_t image_usage = fbaci->pAttachmentImageInfos[attachments[attach].attachment].usage;
if ((image_usage & usage_flag) == 0) {
skip |=
LogError(device, error_code,
"vkCreateFramebuffer: Framebuffer attachment info (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
}
}
}
}
return skip;
}
bool CoreChecks::ValidateFramebufferCreateInfo(const VkFramebufferCreateInfo *pCreateInfo) const {
bool skip = false;
const VkFramebufferAttachmentsCreateInfo *framebuffer_attachments_create_info =
LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(pCreateInfo->pNext);
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) != 0) {
if (!enabled_features.core12.imagelessFramebuffer) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03189",
"vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, "
"but the imagelessFramebuffer feature is not enabled.");
}
if (framebuffer_attachments_create_info == nullptr) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03190",
"vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, "
"but no instance of VkFramebufferAttachmentsCreateInfo is present in the pNext chain.");
} else {
if (framebuffer_attachments_create_info->attachmentImageInfoCount != 0 &&
framebuffer_attachments_create_info->attachmentImageInfoCount != pCreateInfo->attachmentCount) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03191",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount is %u, but "
"VkFramebufferAttachmentsCreateInfo attachmentImageInfoCount is %u.",
pCreateInfo->attachmentCount, framebuffer_attachments_create_info->attachmentImageInfoCount);
}
}
}
auto rp_state = GetRenderPassState(pCreateInfo->renderPass);
if (rp_state) {
const VkRenderPassCreateInfo2 *rpci = rp_state->createInfo.ptr();
if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-attachmentCount-00876",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
"of %u of %s being used to create Framebuffer.",
pCreateInfo->attachmentCount, rpci->attachmentCount,
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
} else {
// attachmentCounts match, so make sure corresponding attachment details line up
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) {
const VkImageView *image_views = pCreateInfo->pAttachments;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto view_state = GetImageViewState(image_views[i]);
if (view_state == nullptr) {
skip |= LogError(
image_views[i], "VUID-VkFramebufferCreateInfo-flags-02778",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is not a valid VkImageView.", i);
} else {
auto &ivci = view_state->create_info;
if (ivci.format != rpci->pAttachments[i].format) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00880",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
"match the format of %s used by the corresponding attachment for %s.",
i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
const VkImageCreateInfo *ici = &GetImageState(ivci.image)->createInfo;
if (ici->samples != rpci->pAttachments[i].samples) {
skip |=
LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00881",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not "
"match the %s "
"samples used by the corresponding attachment for %s.",
i, string_VkSampleCountFlagBits(ici->samples),
string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
// Verify that image memory is valid
auto image_data = GetImageState(ivci.image);
skip |= ValidateMemoryIsBoundToImage(image_data, "vkCreateFramebuffer()",
"UNASSIGNED-CoreValidation-BoundResourceFreedMemoryAccess");
// Verify that view only has a single mip level
if (ivci.subresourceRange.levelCount != 1) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-00883",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
"only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.",
i, ivci.subresourceRange.levelCount);
}
const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
bool used_as_input_color_resolve_depth_stencil_attachment = false;
bool used_as_fragment_shading_rate_attachment = false;
bool fsr_non_zero_viewmasks = false;
for (uint32_t j = 0; j < rpci->subpassCount; ++j) {
const VkSubpassDescription2 &subpass = rpci->pSubpasses[j];
uint32_t highest_view_bit = 0;
for (uint32_t k = 0; k < 32; ++k) {
if (((subpass.viewMask >> k) & 1) != 0) {
highest_view_bit = k;
}
}
for (uint32_t k = 0; k < rpci->pSubpasses[j].inputAttachmentCount; ++k) {
if (subpass.pInputAttachments[k].attachment == i) {
used_as_input_color_resolve_depth_stencil_attachment = true;
break;
}
}
for (uint32_t k = 0; k < rpci->pSubpasses[j].colorAttachmentCount; ++k) {
if (subpass.pColorAttachments[k].attachment == i ||
(subpass.pResolveAttachments && subpass.pResolveAttachments[k].attachment == i)) {
used_as_input_color_resolve_depth_stencil_attachment = true;
break;
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment == i) {
used_as_input_color_resolve_depth_stencil_attachment = true;
}
if (used_as_input_color_resolve_depth_stencil_attachment) {
if (ivci.subresourceRange.layerCount <= highest_view_bit) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-renderPass-04536",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"less than or equal to the highest bit in the view mask (%u) of subpass %u.",
i, ivci.subresourceRange.layerCount, highest_view_bit, j);
}
}
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
const VkFragmentShadingRateAttachmentInfoKHR *fsr_attachment;
fsr_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass.pNext);
if (fsr_attachment && fsr_attachment->pFragmentShadingRateAttachment->attachment == i) {
used_as_fragment_shading_rate_attachment = true;
if ((mip_width * fsr_attachment->shadingRateAttachmentTexelSize.width) < pCreateInfo->width) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04539",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level "
"%u is used as a "
"fragment shading rate attachment in subpass %u, but the product of its "
"width (%u) and the "
"specified shading rate texel width (%u) are smaller than the "
"corresponding framebuffer width (%u).",
i, ivci.subresourceRange.baseMipLevel, j, mip_width,
fsr_attachment->shadingRateAttachmentTexelSize.width, pCreateInfo->width);
}
if ((mip_height * fsr_attachment->shadingRateAttachmentTexelSize.height) <
pCreateInfo->height) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-flags-04540",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u "
"is used as a "
"fragment shading rate attachment in subpass %u, but the product of its "
"height (%u) and the "
"specified shading rate texel height (%u) are smaller than the corresponding "
"framebuffer height (%u).",
i, ivci.subresourceRange.baseMipLevel, j, mip_height,
fsr_attachment->shadingRateAttachmentTexelSize.height, pCreateInfo->height);
}
if (highest_view_bit != 0) {
fsr_non_zero_viewmasks = true;
}
if (ivci.subresourceRange.layerCount <= highest_view_bit) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-04537",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"less than or equal to the highest bit in the view mask (%u) of subpass %u.",
i, ivci.subresourceRange.layerCount, highest_view_bit, j);
}
}
}
}
if (enabled_features.fragment_density_map_features.fragmentDensityMap) {
const VkRenderPassFragmentDensityMapCreateInfoEXT *fdm_attachment;
fdm_attachment = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rpci->pNext);
if (fdm_attachment && fdm_attachment->fragmentDensityMapAttachment.attachment == i) {
uint32_t ceiling_width = static_cast<uint32_t>(ceil(
static_cast<float>(pCreateInfo->width) /
std::max(static_cast<float>(
phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.width),
1.0f)));
if (mip_width < ceiling_width) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-02555",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has width "
"smaller than the corresponding the ceiling of framebuffer width / "
"maxFragmentDensityTexelSize.width "
"Here are the respective dimensions for attachment #%u, the ceiling value:\n "
"attachment #%u, framebuffer:\n"
"width: %u, the ceiling value: %u\n",
i, ivci.subresourceRange.baseMipLevel, i, i, mip_width, ceiling_width);
}
uint32_t ceiling_height = static_cast<uint32_t>(ceil(
static_cast<float>(pCreateInfo->height) /
std::max(static_cast<float>(
phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.height),
1.0f)));
if (mip_height < ceiling_height) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-02556",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has height "
"smaller than the corresponding the ceiling of framebuffer height / "
"maxFragmentDensityTexelSize.height "
"Here are the respective dimensions for attachment #%u, the ceiling value:\n "
"attachment #%u, framebuffer:\n"
"height: %u, the ceiling value: %u\n",
i, ivci.subresourceRange.baseMipLevel, i, i, mip_height, ceiling_height);
}
}
}
if (used_as_input_color_resolve_depth_stencil_attachment) {
if (mip_width < pCreateInfo->width) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04533",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has "
"width (%u) smaller than the corresponding framebuffer width (%u).",
i, mip_level, mip_width, pCreateInfo->width);
}
if (mip_height < pCreateInfo->height) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04534",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has "
"height (%u) smaller than the corresponding framebuffer height (%u).",
i, mip_level, mip_height, pCreateInfo->height);
}
if (ivci.subresourceRange.layerCount < pCreateInfo->layers) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-flags-04535",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"smaller than the corresponding framebuffer layer count (%u).",
i, ivci.subresourceRange.layerCount, pCreateInfo->layers);
}
}
if (used_as_fragment_shading_rate_attachment && !fsr_non_zero_viewmasks) {
if (ivci.subresourceRange.layerCount != 1 && ivci.subresourceRange.layerCount < pCreateInfo->layers) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-flags-04538",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"smaller than the corresponding framebuffer layer count (%u).",
i, ivci.subresourceRange.layerCount, pCreateInfo->layers);
}
}
if (IsIdentitySwizzle(ivci.components) == false) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-00884",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
}
if ((ivci.viewType == VK_IMAGE_VIEW_TYPE_2D) || (ivci.viewType == VK_IMAGE_VIEW_TYPE_2D)) {
const auto image_state = GetImageState(ivci.image);
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if (FormatIsDepthOrStencil(ivci.format)) {
LogObjectList objlist(device);
objlist.add(ivci.image);
skip |= LogError(
objlist, "VUID-VkFramebufferCreateInfo-pAttachments-00891",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has an image view type of "
"%s "
"which was taken from image %s of type VK_IMAGE_TYPE_3D, but the image view format is a "
"depth/stencil format %s",
i, string_VkImageViewType(ivci.viewType), report_data->FormatHandle(ivci.image).c_str(),
string_VkFormat(ivci.format));
}
}
}
if (ivci.viewType == VK_IMAGE_VIEW_TYPE_3D) {
LogObjectList objlist(device);
objlist.add(image_views[i]);
skip |= LogError(objlist, "VUID-VkFramebufferCreateInfo-flags-04113",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has an image view type "
"of VK_IMAGE_VIEW_TYPE_3D",
i);
}
}
}
} else if (framebuffer_attachments_create_info) {
// VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT is set
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto &aii = framebuffer_attachments_create_info->pAttachmentImageInfos[i];
bool format_found = false;
for (uint32_t j = 0; j < aii.viewFormatCount; ++j) {
if (aii.pViewFormats[j] == rpci->pAttachments[i].format) {
format_found = true;
}
}
if (!format_found) {
skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-flags-03205",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u does not include "
"format %s used "
"by the corresponding attachment for renderPass (%s).",
i, string_VkFormat(rpci->pAttachments[i].format),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
bool used_as_input_color_resolve_depth_stencil_attachment = false;
bool used_as_fragment_shading_rate_attachment = false;
bool fsr_non_zero_viewmasks = false;
for (uint32_t j = 0; j < rpci->subpassCount; ++j) {
const VkSubpassDescription2 &subpass = rpci->pSubpasses[j];
uint32_t highest_view_bit = 0;
for (int k = 0; k < 32; ++k) {
if (((subpass.viewMask >> k) & 1) != 0) {
highest_view_bit = k;
}
}
for (uint32_t k = 0; k < rpci->pSubpasses[j].inputAttachmentCount; ++k) {
if (subpass.pInputAttachments[k].attachment == i) {
used_as_input_color_resolve_depth_stencil_attachment = true;
break;
}
}
for (uint32_t k = 0; k < rpci->pSubpasses[j].colorAttachmentCount; ++k) {
if (subpass.pColorAttachments[k].attachment == i ||
(subpass.pResolveAttachments && subpass.pResolveAttachments[k].attachment == i)) {
used_as_input_color_resolve_depth_stencil_attachment = true;
break;
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment == i) {
used_as_input_color_resolve_depth_stencil_attachment = true;
}
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
const VkFragmentShadingRateAttachmentInfoKHR *fsr_attachment;
fsr_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass.pNext);
if (fsr_attachment && fsr_attachment->pFragmentShadingRateAttachment->attachment == i) {
used_as_fragment_shading_rate_attachment = true;
if ((aii.width * fsr_attachment->shadingRateAttachmentTexelSize.width) < pCreateInfo->width) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-04543",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is used as a "
"fragment shading rate attachment in subpass %u, but the product of its width (%u) and the "
"specified shading rate texel width (%u) are smaller than the corresponding framebuffer "
"width (%u).",
i, j, aii.width, fsr_attachment->shadingRateAttachmentTexelSize.width, pCreateInfo->width);
}
if ((aii.height * fsr_attachment->shadingRateAttachmentTexelSize.height) < pCreateInfo->height) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04544",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is used as a "
"fragment shading rate attachment in subpass %u, but the product of its "
"height (%u) and the "
"specified shading rate texel height (%u) are smaller than the corresponding "
"framebuffer height (%u).",
i, j, aii.height, fsr_attachment->shadingRateAttachmentTexelSize.height,
pCreateInfo->height);
}
if (highest_view_bit != 0) {
fsr_non_zero_viewmasks = true;
}
if (aii.layerCount != 1 && aii.layerCount <= highest_view_bit) {
skip |= LogError(
device, kVUIDUndefined,
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"less than or equal to the highest bit in the view mask (%u) of subpass %u.",
i, aii.layerCount, highest_view_bit, j);
}
}
}
}
if (used_as_input_color_resolve_depth_stencil_attachment) {
if (aii.width < pCreateInfo->width) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-04541",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a width of only #%u, "
"but framebuffer has a width of #%u.",
i, aii.width, pCreateInfo->width);
}
if (aii.height < pCreateInfo->height) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-04542",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a height of only #%u, "
"but framebuffer has a height of #%u.",
i, aii.height, pCreateInfo->height);
}
const char *mismatched_layers_no_multiview_vuid = device_extensions.vk_khr_multiview
? "VUID-VkFramebufferCreateInfo-renderPass-04546"
: "VUID-VkFramebufferCreateInfo-flags-04547";
if ((rpci->subpassCount == 0) || (rpci->pSubpasses[0].viewMask == 0)) {
if (aii.layerCount < pCreateInfo->layers) {
skip |= LogError(
device, mismatched_layers_no_multiview_vuid,
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has only #%u layers, "
"but framebuffer has #%u layers.",
i, aii.layerCount, pCreateInfo->layers);
}
}
}
if (used_as_fragment_shading_rate_attachment && !fsr_non_zero_viewmasks) {
if (aii.layerCount != 1 && aii.layerCount < pCreateInfo->layers) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04545",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"smaller than the corresponding framebuffer layer count (%u).",
i, aii.layerCount, pCreateInfo->layers);
}
}
}
// Validate image usage
uint32_t attachment_index = VK_ATTACHMENT_UNUSED;
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
skip |= MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pColorAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201");
skip |=
MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pResolveAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201");
skip |= MatchUsage(1, rpci->pSubpasses[i].pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03202");
skip |= MatchUsage(rpci->pSubpasses[i].inputAttachmentCount, rpci->pSubpasses[i].pInputAttachments, pCreateInfo,
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03204");
const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve =
LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext);
if (device_extensions.vk_khr_depth_stencil_resolve && depth_stencil_resolve != nullptr) {
skip |= MatchUsage(1, depth_stencil_resolve->pDepthStencilResolveAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03203");
}
const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment_info =
LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(rpci->pSubpasses[i].pNext);
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate &&
fragment_shading_rate_attachment_info != nullptr) {
skip |= MatchUsage(1, fragment_shading_rate_attachment_info->pFragmentShadingRateAttachment, pCreateInfo,
VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,
"VUID-VkFramebufferCreateInfo-flags-04549");
}
}
if (device_extensions.vk_khr_multiview) {
if ((rpci->subpassCount > 0) && (rpci->pSubpasses[0].viewMask != 0)) {
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve =
LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext);
uint32_t view_bits = rpci->pSubpasses[i].viewMask;
uint32_t highest_view_bit = 0;
for (int j = 0; j < 32; ++j) {
if (((view_bits >> j) & 1) != 0) {
highest_view_bit = j;
}
}
for (uint32_t j = 0; j < rpci->pSubpasses[i].colorAttachmentCount; ++j) {
attachment_index = rpci->pSubpasses[i].pColorAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a color attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
if (rpci->pSubpasses[i].pResolveAttachments) {
attachment_index = rpci->pSubpasses[i].pResolveAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a resolve attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
}
}
for (uint32_t j = 0; j < rpci->pSubpasses[i].inputAttachmentCount; ++j) {
attachment_index = rpci->pSubpasses[i].pInputAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as an input attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
}
if (rpci->pSubpasses[i].pDepthStencilAttachment != nullptr) {
attachment_index = rpci->pSubpasses[i].pDepthStencilAttachment->attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a depth/stencil attachment.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit);
}
}
if (device_extensions.vk_khr_depth_stencil_resolve && depth_stencil_resolve != nullptr &&
depth_stencil_resolve->pDepthStencilResolveAttachment != nullptr) {
attachment_index = depth_stencil_resolve->pDepthStencilResolveAttachment->attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a depth/stencil resolve "
"attachment.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit);
}
}
}
}
}
}
}
}
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) {
// Verify correct attachment usage flags
for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
const VkSubpassDescription2 &subpass_description = rpci->pSubpasses[subpass];
// Verify input attachments:
skip |= MatchUsage(subpass_description.inputAttachmentCount, subpass_description.pInputAttachments, pCreateInfo,
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00879");
// Verify color attachments:
skip |= MatchUsage(subpass_description.colorAttachmentCount, subpass_description.pColorAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00877");
// Verify depth/stencil attachments:
skip |=
MatchUsage(1, subpass_description.pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02633");
// Verify depth/stecnil resolve
if (device_extensions.vk_khr_depth_stencil_resolve) {
const VkSubpassDescriptionDepthStencilResolve *ds_resolve =
LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_description.pNext);
if (ds_resolve) {
skip |= MatchUsage(1, ds_resolve->pDepthStencilResolveAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
"VUID-VkFramebufferCreateInfo-pAttachments-02634");
}
}
// Verify fragment shading rate attachments
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment_info =
LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass_description.pNext);
if (fragment_shading_rate_attachment_info) {
skip |= MatchUsage(1, fragment_shading_rate_attachment_info->pFragmentShadingRateAttachment,
pCreateInfo, VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,
"VUID-VkFramebufferCreateInfo-flags-04548");
}
}
}
}
bool b_has_non_zero_view_masks = false;
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
if (rpci->pSubpasses[i].viewMask != 0) {
b_has_non_zero_view_masks = true;
break;
}
}
if (b_has_non_zero_view_masks && pCreateInfo->layers != 1) {
skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-02531",
"vkCreateFramebuffer(): VkFramebufferCreateInfo has #%u layers but "
"renderPass (%s) was specified with non-zero view masks\n",
pCreateInfo->layers, report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
}
}
// Verify FB dimensions are within physical device limits
if (pCreateInfo->width > phys_dev_props.limits.maxFramebufferWidth) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00886",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
"width: %u, device max: %u\n",
pCreateInfo->width, phys_dev_props.limits.maxFramebufferWidth);
}
if (pCreateInfo->height > phys_dev_props.limits.maxFramebufferHeight) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-height-00888",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
"height: %u, device max: %u\n",
pCreateInfo->height, phys_dev_props.limits.maxFramebufferHeight);
}
if (pCreateInfo->layers > phys_dev_props.limits.maxFramebufferLayers) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-layers-00890",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
"layers: %u, device max: %u\n",
pCreateInfo->layers, phys_dev_props.limits.maxFramebufferLayers);
}
// Verify FB dimensions are greater than zero
if (pCreateInfo->width <= 0) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00885",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero.");
}
if (pCreateInfo->height <= 0) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-height-00887",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero.");
}
if (pCreateInfo->layers <= 0) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-layers-00889",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero.");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) const {
// TODO : Verify that renderPass FB is created with is compatible with FB
bool skip = false;
skip |= ValidateFramebufferCreateInfo(pCreateInfo);
return skip;
}
static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
layer_data::unordered_set<uint32_t> &processed_nodes) {
// If we have already checked this node we have not found a dependency path so return false.
if (processed_nodes.count(index)) return false;
processed_nodes.insert(index);
const DAGNode &node = subpass_to_node[index];
// Look for a dependency path. If one exists return true else recurse on the previous nodes.
if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
for (auto elem : node.prev) {
if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
}
} else {
return true;
}
return false;
}
bool CoreChecks::IsImageLayoutReadOnly(VkImageLayout layout) const {
if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) ||
(layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL) ||
(layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL)) {
return true;
}
return false;
}
bool CoreChecks::CheckDependencyExists(const VkRenderPass renderpass, const uint32_t subpass, const VkImageLayout layout,
const std::vector<SubpassLayout> &dependent_subpasses,
const std::vector<DAGNode> &subpass_to_node, bool &skip) const {
bool result = true;
bool b_image_layout_read_only = IsImageLayoutReadOnly(layout);
// Loop through all subpasses that share the same attachment and make sure a dependency exists
for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
const SubpassLayout &sp = dependent_subpasses[k];
if (subpass == sp.index) continue;
if (b_image_layout_read_only && IsImageLayoutReadOnly(sp.layout)) continue;
const DAGNode &node = subpass_to_node[subpass];
// Check for a specified dependency between the two nodes. If one exists we are done.
auto prev_elem = std::find(node.prev.begin(), node.prev.end(), sp.index);
auto next_elem = std::find(node.next.begin(), node.next.end(), sp.index);
if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
// If no dependency exits an implicit dependency still might. If not, throw an error.
layer_data::unordered_set<uint32_t> processed_nodes;
if (!(FindDependency(subpass, sp.index, subpass_to_node, processed_nodes) ||
FindDependency(sp.index, subpass, subpass_to_node, processed_nodes))) {
skip |=
LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass,
"A dependency between subpasses %d and %d must exist but one is not specified.", subpass, sp.index);
result = false;
}
}
}
return result;
}
bool CoreChecks::CheckPreserved(const VkRenderPass renderpass, const VkRenderPassCreateInfo2 *pCreateInfo, const int index,
const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth,
bool &skip) const {
const DAGNode &node = subpass_to_node[index];
// If this node writes to the attachment return true as next nodes need to preserve the attachment.
const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[index];
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (attachment == subpass.pColorAttachments[j].attachment) return true;
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
if (attachment == subpass.pInputAttachments[j].attachment) return true;
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
}
bool result = false;
// Loop through previous nodes and see if any of them write to the attachment.
for (auto elem : node.prev) {
result |= CheckPreserved(renderpass, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
}
// If the attachment was written to by a previous node than this node needs to preserve it.
if (result && depth > 0) {
bool has_preserved = false;
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
if (subpass.pPreserveAttachments[j] == attachment) {
has_preserved = true;
break;
}
}
if (!has_preserved) {
skip |= LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass,
"Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
}
}
return result;
}
template <class T>
bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) {
return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
((offset1 > offset2) && (offset1 < (offset2 + size2)));
}
bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
}
bool CoreChecks::ValidateDependencies(FRAMEBUFFER_STATE const *framebuffer, RENDER_PASS_STATE const *renderPass) const {
bool skip = false;
auto const framebuffer_info = framebuffer->createInfo.ptr();
auto const create_info = renderPass->createInfo.ptr();
auto const &subpass_to_node = renderPass->subpassToNode;
struct Attachment {
std::vector<SubpassLayout> outputs;
std::vector<SubpassLayout> inputs;
std::vector<uint32_t> overlapping;
};
std::vector<Attachment> attachments(create_info->attachmentCount);
if (!(framebuffer_info->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT)) {
// Find overlapping attachments
for (uint32_t i = 0; i < create_info->attachmentCount; ++i) {
for (uint32_t j = i + 1; j < create_info->attachmentCount; ++j) {
VkImageView viewi = framebuffer_info->pAttachments[i];
VkImageView viewj = framebuffer_info->pAttachments[j];
if (viewi == viewj) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
continue;
}
auto view_state_i = GetImageViewState(viewi);
auto view_state_j = GetImageViewState(viewj);
if (!view_state_i || !view_state_j) {
continue;
}
auto view_ci_i = view_state_i->create_info;
auto view_ci_j = view_state_j->create_info;
if (view_ci_i.image == view_ci_j.image &&
IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
continue;
}
auto image_data_i = GetImageState(view_ci_i.image);
auto image_data_j = GetImageState(view_ci_j.image);
if (!image_data_i || !image_data_j) {
continue;
}
if (image_data_i->binding.mem_state == image_data_j->binding.mem_state &&
IsRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
image_data_j->binding.size)) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
}
}
}
}
// Find for each attachment the subpasses that use them.
layer_data::unordered_set<uint32_t> attachment_indices;
for (uint32_t i = 0; i < create_info->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = create_info->pSubpasses[i];
attachment_indices.clear();
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
SubpassLayout sp = {i, subpass.pInputAttachments[j].layout};
attachments[attachment].inputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].inputs.emplace_back(sp);
}
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
SubpassLayout sp = {i, subpass.pColorAttachments[j].layout};
attachments[attachment].outputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].outputs.emplace_back(sp);
}
attachment_indices.insert(attachment);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
SubpassLayout sp = {i, subpass.pDepthStencilAttachment->layout};
attachments[attachment].outputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].outputs.emplace_back(sp);
}
if (attachment_indices.count(attachment)) {
skip |=
LogError(renderPass->renderPass, kVUID_Core_DrawState_InvalidRenderpass,
"Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
}
}
}
// If there is a dependency needed make sure one exists
for (uint32_t i = 0; i < create_info->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = create_info->pSubpasses[i];
// If the attachment is an input then all subpasses that output must have a dependency relationship
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(renderPass->renderPass, i, subpass.pInputAttachments[j].layout, attachments[attachment].outputs,
subpass_to_node, skip);
}
// If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(renderPass->renderPass, i, subpass.pColorAttachments[j].layout, attachments[attachment].outputs,
subpass_to_node, skip);
CheckDependencyExists(renderPass->renderPass, i, subpass.pColorAttachments[j].layout, attachments[attachment].inputs,
subpass_to_node, skip);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
CheckDependencyExists(renderPass->renderPass, i, subpass.pDepthStencilAttachment->layout,
attachments[attachment].outputs, subpass_to_node, skip);
CheckDependencyExists(renderPass->renderPass, i, subpass.pDepthStencilAttachment->layout,
attachments[attachment].inputs, subpass_to_node, skip);
}
}
// Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
// written.
for (uint32_t i = 0; i < create_info->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = create_info->pSubpasses[i];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
CheckPreserved(renderPass->renderPass, create_info, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0,
skip);
}
}
return skip;
}
bool CoreChecks::ValidateRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo) const {
bool skip = false;
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
const VkSubpassDependency2 &dependency = pCreateInfo->pDependencies[i];
auto latest_src_stage = sync_utils::GetLogicallyLatestGraphicsPipelineStage(dependency.srcStageMask);
auto earliest_dst_stage = sync_utils::GetLogicallyEarliestGraphicsPipelineStage(dependency.dstStageMask);
// The first subpass here serves as a good proxy for "is multiview enabled" - since all view masks need to be non-zero if
// any are, which enables multiview.
if (use_rp2 && (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && (pCreateInfo->pSubpasses[0].viewMask == 0)) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-viewMask-03059",
"Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but multiview is not enabled for this render pass.", i);
} else if (use_rp2 && !(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && dependency.viewOffset != 0) {
skip |= LogError(device, "VUID-VkSubpassDependency2-dependencyFlags-03092",
"Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but also specifies a view offset of %u.", i,
dependency.viewOffset);
} else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
if (dependency.srcSubpass == dependency.dstSubpass) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03085" : "VUID-VkSubpassDependency-srcSubpass-00865";
skip |= LogError(device, vuid, "The src and dst subpasses in dependency %u are both external.", i);
} else if (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) {
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
vuid = "VUID-VkSubpassDependency-dependencyFlags-02520";
} else { // dependency.dstSubpass == VK_SUBPASS_EXTERNAL
vuid = "VUID-VkSubpassDependency-dependencyFlags-02521";
}
if (use_rp2) {
// Create render pass 2 distinguishes between source and destination external dependencies.
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
vuid = "VUID-VkSubpassDependency2-dependencyFlags-03090";
} else {
vuid = "VUID-VkSubpassDependency2-dependencyFlags-03091";
}
}
skip |=
LogError(device, vuid,
"Dependency %u specifies an external dependency but also specifies VK_DEPENDENCY_VIEW_LOCAL_BIT.", i);
}
} else if (dependency.srcSubpass > dependency.dstSubpass) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03084" : "VUID-VkSubpassDependency-srcSubpass-00864";
skip |= LogError(device, vuid,
"Dependency %u specifies a dependency from a later subpass (%u) to an earlier subpass (%u), which is "
"disallowed to prevent cyclic dependencies.",
i, dependency.srcSubpass, dependency.dstSubpass);
} else if (dependency.srcSubpass == dependency.dstSubpass) {
if (dependency.viewOffset != 0) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-viewOffset-02530" : "VUID-VkRenderPassCreateInfo-pNext-01930";
skip |= LogError(device, vuid, "Dependency %u specifies a self-dependency but has a non-zero view offset of %u", i,
dependency.viewOffset);
} else if ((dependency.dependencyFlags | VK_DEPENDENCY_VIEW_LOCAL_BIT) != dependency.dependencyFlags &&
pCreateInfo->pSubpasses[dependency.srcSubpass].viewMask > 1) {
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pDependencies-03060" : "VUID-VkSubpassDependency-srcSubpass-00872";
skip |= LogError(device, vuid,
"Dependency %u specifies a self-dependency for subpass %u with a non-zero view mask, but does not "
"specify VK_DEPENDENCY_VIEW_LOCAL_BIT.",
i, dependency.srcSubpass);
} else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) ||
HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask)) &&
(sync_utils::GetGraphicsPipelineStageLogicalOrdinal(latest_src_stage) >
sync_utils::GetGraphicsPipelineStageLogicalOrdinal(earliest_dst_stage))) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03087" : "VUID-VkSubpassDependency-srcSubpass-00867";
skip |= LogError(
device, vuid,
"Dependency %u specifies a self-dependency from logically-later stage (%s) to a logically-earlier stage (%s).",
i, sync_utils::StringPipelineStageFlags(latest_src_stage).c_str(),
sync_utils::StringPipelineStageFlags(earliest_dst_stage).c_str());
} else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) == false) &&
(HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask) == false) &&
((dependency.dependencyFlags & VK_DEPENDENCY_BY_REGION_BIT) == 0)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-02245" : "VUID-VkSubpassDependency-srcSubpass-02243";
skip |= LogError(device, vuid,
"Dependency %u specifies a self-dependency for subpass %u with both stages including a "
"framebuffer-space stage, but does not specify VK_DEPENDENCY_BY_REGION_BIT in dependencyFlags.",
i, dependency.srcSubpass);
}
}
}
return skip;
}
bool CoreChecks::ValidateAttachmentIndex(RenderPassCreateVersion rp_version, uint32_t attachment, uint32_t attachment_count,
const char *error_type, const char *function_name) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
assert(attachment != VK_ATTACHMENT_UNUSED);
if (attachment >= attachment_count) {
const char *vuid =
use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-03051" : "VUID-VkRenderPassCreateInfo-attachment-00834";
skip |= LogError(device, vuid, "%s: %s attachment %d must be less than the total number of attachments %d.", function_name,
error_type, attachment, attachment_count);
}
return skip;
}
enum AttachmentType {
ATTACHMENT_COLOR = 1,
ATTACHMENT_DEPTH = 2,
ATTACHMENT_INPUT = 4,
ATTACHMENT_PRESERVE = 8,
ATTACHMENT_RESOLVE = 16,
};
char const *StringAttachmentType(uint8_t type) {
switch (type) {
case ATTACHMENT_COLOR:
return "color";
case ATTACHMENT_DEPTH:
return "depth";
case ATTACHMENT_INPUT:
return "input";
case ATTACHMENT_PRESERVE:
return "preserve";
case ATTACHMENT_RESOLVE:
return "resolve";
default:
return "(multiple)";
}
}
bool CoreChecks::AddAttachmentUse(RenderPassCreateVersion rp_version, uint32_t subpass, std::vector<uint8_t> &attachment_uses,
std::vector<VkImageLayout> &attachment_layouts, uint32_t attachment, uint8_t new_use,
VkImageLayout new_layout) const {
if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */
bool skip = false;
auto &uses = attachment_uses[attachment];
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCreateRenderPass2()" : "vkCreateRenderPass()";
if (uses & new_use) {
if (attachment_layouts[attachment] != new_layout) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-layout-02528" : "VUID-VkSubpassDescription-layout-02519";
skip |= LogError(device, vuid, "%s: subpass %u already uses attachment %u with a different image layout (%s vs %s).",
function_name, subpass, attachment, string_VkImageLayout(attachment_layouts[attachment]),
string_VkImageLayout(new_layout));
}
} else if (uses & ~ATTACHMENT_INPUT || (uses && (new_use == ATTACHMENT_RESOLVE || new_use == ATTACHMENT_PRESERVE))) {
/* Note: input attachments are assumed to be done first. */
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pPreserveAttachments-03074"
: "VUID-VkSubpassDescription-pPreserveAttachments-00854";
skip |= LogError(device, vuid, "%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass,
attachment, StringAttachmentType(uses), StringAttachmentType(new_use));
} else {
attachment_layouts[attachment] = new_layout;
uses |= new_use;
}
return skip;
}
// Handles attachment references regardless of type (input, color, depth, etc)
// Input attachments have extra VUs associated with them
bool CoreChecks::ValidateAttachmentReference(RenderPassCreateVersion rp_version, VkAttachmentReference2 reference,
const VkFormat attachment_format, bool input, const char *error_type,
const char *function_name) const {
bool skip = false;
// Currently all VUs require attachment to not be UNUSED
assert(reference.attachment != VK_ATTACHMENT_UNUSED);
// currently VkAttachmentReference and VkAttachmentReference2 have no overlapping VUs
if (rp_version == RENDER_PASS_VERSION_1) {
switch (reference.layout) {
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
skip |= LogError(device, "VUID-VkAttachmentReference-layout-00857",
"%s: Layout for %s is %s but must not be "
"VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR|DEPTH_ATTACHMENT_OPTIMAL|DEPTH_READ_"
"ONLY_OPTIMAL|STENCIL_ATTACHMENT_OPTIMAL|STENCIL_READ_ONLY_OPTIMAL].",
function_name, error_type, string_VkImageLayout(reference.layout));
break;
default:
break;
}
} else {
const auto *attachment_reference_stencil_layout = LvlFindInChain<VkAttachmentReferenceStencilLayout>(reference.pNext);
switch (reference.layout) {
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
skip |=
LogError(device, "VUID-VkAttachmentReference2-layout-03077",
"%s: Layout for %s is %s but must not be VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR].",
function_name, error_type, string_VkImageLayout(reference.layout));
break;
// Only other layouts in VUs to be checked
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
// First need to make sure feature bit is enabled and the format is actually a depth and/or stencil
if (!enabled_features.core12.separateDepthStencilLayouts) {
skip |= LogError(device, "VUID-VkAttachmentReference2-separateDepthStencilLayouts-03313",
"%s: Layout for %s is %s but without separateDepthStencilLayouts enabled the layout must not "
"be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.",
function_name, error_type, string_VkImageLayout(reference.layout));
} else if (!FormatIsDepthOrStencil(attachment_format)) {
// using this over FormatIsColor() incase a multiplane and/or undef would sneak in
// "color" format is still an ambiguous term in spec (internal issue #2484)
skip |= LogError(
device, "VUID-VkAttachmentReference2-attachment-04754",
"%s: Layout for %s is %s but the attachment is a not a depth/stencil format (%s) so the layout must not "
"be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.",
function_name, error_type, string_VkImageLayout(reference.layout), string_VkFormat(attachment_format));
} else {
if ((reference.layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL) ||
(reference.layout == VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL)) {
if (FormatIsDepthOnly(attachment_format)) {
skip |= LogError(
device, "VUID-VkAttachmentReference2-attachment-04756",
"%s: Layout for %s is %s but the attachment is a depth-only format (%s) so the layout must not "
"be VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.",
function_name, error_type, string_VkImageLayout(reference.layout),
string_VkFormat(attachment_format));
}
} else {
// DEPTH_ATTACHMENT_OPTIMAL || DEPTH_READ_ONLY_OPTIMAL
if (FormatIsStencilOnly(attachment_format)) {
skip |= LogError(
device, "VUID-VkAttachmentReference2-attachment-04757",
"%s: Layout for %s is %s but the attachment is a depth-only format (%s) so the layout must not "
"be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL.",
function_name, error_type, string_VkImageLayout(reference.layout),
string_VkFormat(attachment_format));
}
if (attachment_reference_stencil_layout) {
// This check doesn't rely on the aspect mask value
const VkImageLayout stencil_layout = attachment_reference_stencil_layout->stencilLayout;
// clang-format off
if (stencil_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
stencil_layout == VK_IMAGE_LAYOUT_PREINITIALIZED ||
stencil_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
skip |= LogError(device, "VUID-VkAttachmentReferenceStencilLayout-stencilLayout-03318",
"%s: In %s with pNext chain instance VkAttachmentReferenceStencilLayout, "
"the stencilLayout (%s) must not be "
"VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_PREINITIALIZED, "
"VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, or "
"VK_IMAGE_LAYOUT_PRESENT_SRC_KHR.",
function_name, error_type, string_VkImageLayout(stencil_layout));
}
// clang-format on
} else if (FormatIsDepthAndStencil(attachment_format)) {
skip |= LogError(
device, "VUID-VkAttachmentReference2-attachment-04755",
"%s: Layout for %s is %s but the attachment is a depth and stencil format (%s) so if the layout is "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL there needs "
"to be a VkAttachmentReferenceStencilLayout in the pNext chain to set the seperate stencil layout "
"because the separateDepthStencilLayouts feature is enabled.",
function_name, error_type, string_VkImageLayout(reference.layout),
string_VkFormat(attachment_format));
}
}
}
break;
default:
break;
}
}
return skip;
}
bool CoreChecks::ValidateRenderpassAttachmentUsage(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo,
const char *function_name) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
VkFormat format = pCreateInfo->pAttachments[i].format;
if (pCreateInfo->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
if ((FormatIsColor(format) || FormatHasDepth(format)) &&
pCreateInfo->pAttachments[i].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
skip |= LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass,
"%s: Render pass pAttachment[%u] has loadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout == "
"VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using "
"VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the "
"render pass.",
function_name, i);
}
if (FormatHasStencil(format) && pCreateInfo->pAttachments[i].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
skip |=
LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass,
"%s: Render pass pAttachment[%u] has stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout "
"== VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using "
"VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the "
"render pass.",
function_name, i);
}
}
}
// Track when we're observing the first use of an attachment
std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
// Track if attachments are used as input as well as another type
layer_data::unordered_set<uint32_t> input_attachments;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i];
std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount);
std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount);
if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pipelineBindPoint-03062"
: "VUID-VkSubpassDescription-pipelineBindPoint-00844";
skip |= LogError(device, vuid, "%s: Pipeline bind point for pSubpasses[%d] must be VK_PIPELINE_BIND_POINT_GRAPHICS.",
function_name, i);
}
// Check input attachments first
// - so we can detect first-use-as-input for VU #00349
// - if other color or depth/stencil is also input, it limits valid layouts
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
auto const &attachment_ref = subpass.pInputAttachments[j];
const uint32_t attachment_index = attachment_ref.attachment;
const VkImageAspectFlags aspect_mask = attachment_ref.aspectMask;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
input_attachments.insert(attachment_index);
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pInputAttachments[" + std::to_string(j) + "]";
skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
if (aspect_mask & VK_IMAGE_ASPECT_METADATA_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-02801"
: "VUID-VkInputAttachmentAspectReference-aspectMask-01964";
skip |= LogError(
device, vuid,
"%s: Aspect mask for input attachment reference %d in subpass %d includes VK_IMAGE_ASPECT_METADATA_BIT.",
function_name, j, i);
} else if (aspect_mask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-04563"
: "VUID-VkInputAttachmentAspectReference-aspectMask-02250";
skip |= LogError(device, vuid,
"%s: Aspect mask for input attachment reference %d in subpass %d includes "
"VK_IMAGE_ASPECT_MEMORY_PLANE_*_BIT_EXT bit.",
function_name, j, i);
}
// safe to dereference pCreateInfo->pAttachments[]
if (attachment_index < pCreateInfo->attachmentCount) {
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format;
skip |= ValidateAttachmentReference(rp_version, attachment_ref, attachment_format, true, error_type.c_str(),
function_name);
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_INPUT,
attachment_ref.layout);
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-02525" : "VUID-VkRenderPassCreateInfo-pNext-01963";
skip |= ValidateImageAspectMask(VK_NULL_HANDLE, attachment_format, aspect_mask, function_name, vuid);
if (attach_first_use[attachment_index]) {
skip |=
ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pInputAttachments[j].layout,
attachment_index, pCreateInfo->pAttachments[attachment_index]);
bool used_as_depth = (subpass.pDepthStencilAttachment != NULL &&
subpass.pDepthStencilAttachment->attachment == attachment_index);
bool used_as_color = false;
for (uint32_t k = 0; !used_as_depth && !used_as_color && k < subpass.colorAttachmentCount; ++k) {
used_as_color = (subpass.pColorAttachments[k].attachment == attachment_index);
}
if (!used_as_depth && !used_as_color &&
pCreateInfo->pAttachments[attachment_index].loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-loadOp-03064" : "VUID-VkSubpassDescription-loadOp-00846";
skip |= LogError(device, vuid,
"%s: attachment %u is first used as an input attachment in %s with loadOp set to "
"VK_ATTACHMENT_LOAD_OP_CLEAR.",
function_name, attachment_index, error_type.c_str());
}
}
attach_first_use[attachment_index] = false;
const VkFormatFeatureFlags valid_flags =
VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & valid_flags) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pInputAttachments-02897"
: "VUID-VkSubpassDescription-pInputAttachments-02647";
skip |=
LogError(device, vuid,
"%s: Input attachment %s format (%s) does not contain VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT "
"| VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
}
if (rp_version == RENDER_PASS_VERSION_2) {
// These are validated automatically as part of parameter validation for create renderpass 1
// as they are in a struct that only applies to input attachments - not so for v2.
// Check for 0
if (aspect_mask == 0) {
skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02800",
"%s: Input attachment %s aspect mask must not be 0.", function_name, error_type.c_str());
} else {
const VkImageAspectFlags valid_bits =
(VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT |
VK_IMAGE_ASPECT_METADATA_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT |
VK_IMAGE_ASPECT_PLANE_2_BIT | VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT);
// Check for valid aspect mask bits
if (aspect_mask & ~valid_bits) {
skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02799",
"%s: Input attachment %s aspect mask (0x%" PRIx32 ")is invalid.", function_name,
error_type.c_str(), aspect_mask);
}
}
}
// Validate layout
vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437";
switch (attachment_ref.layout) {
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_GENERAL:
case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR:
break; // valid layouts
default:
skip |= LogError(device, vuid,
"%s: %s layout is %s but input attachments must be "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL, or "
"VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR",
function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout));
break;
}
}
}
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pPreserveAttachments[" + std::to_string(j) + "]";
uint32_t attachment = subpass.pPreserveAttachments[j];
if (attachment == VK_ATTACHMENT_UNUSED) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-03073" : "VUID-VkSubpassDescription-attachment-00853";
skip |= LogError(device, vuid, "%s: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", function_name, j);
} else {
skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
if (attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_PRESERVE,
VkImageLayout(0) /* preserve doesn't have any layout */);
}
}
}
bool subpass_performs_resolve = false;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (subpass.pResolveAttachments) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pResolveAttachments[" + std::to_string(j) + "]";
auto const &attachment_ref = subpass.pResolveAttachments[j];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount,
error_type.c_str(), function_name);
// safe to dereference pCreateInfo->pAttachments[]
if (attachment_ref.attachment < pCreateInfo->attachmentCount) {
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_ref.attachment].format;
skip |= ValidateAttachmentReference(rp_version, attachment_ref, attachment_format, false,
error_type.c_str(), function_name);
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_RESOLVE, attachment_ref.layout);
subpass_performs_resolve = true;
if (pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03067"
: "VUID-VkSubpassDescription-pResolveAttachments-00849";
skip |= LogError(
device, vuid,
"%s: Subpass %u requests multisample resolve into attachment %u, which must "
"have VK_SAMPLE_COUNT_1_BIT but has %s.",
function_name, i, attachment_ref.attachment,
string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples));
}
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-02899"
: "VUID-VkSubpassDescription-pResolveAttachments-02649";
skip |= LogError(device, vuid,
"%s: Resolve attachment %s format (%s) does not contain "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
}
}
}
}
if (subpass.pDepthStencilAttachment) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pDepthStencilAttachment";
const uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
const VkImageLayout image_layout = subpass.pDepthStencilAttachment->layout;
if (attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
// safe to dereference pCreateInfo->pAttachments[]
if (attachment < pCreateInfo->attachmentCount) {
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment].format;
skip |= ValidateAttachmentReference(rp_version, *subpass.pDepthStencilAttachment, attachment_format, false,
error_type.c_str(), function_name);
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_DEPTH,
image_layout);
if (attach_first_use[attachment]) {
skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, image_layout, attachment,
pCreateInfo->pAttachments[attachment]);
}
attach_first_use[attachment] = false;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-02900"
: "VUID-VkSubpassDescription-pDepthStencilAttachment-02650";
skip |= LogError(device, vuid,
"%s: Depth Stencil %s format (%s) does not contain "
"VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
}
// Check for valid imageLayout
vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437";
switch (image_layout) {
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_GENERAL:
break; // valid layouts
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR:
case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR:
if (input_attachments.find(attachment) != input_attachments.end()) {
skip |= LogError(
device, vuid,
"%s: %s is also an input attachment so the layout (%s) must not be "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR "
"or VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.",
function_name, error_type.c_str(), string_VkImageLayout(image_layout));
}
break;
default:
skip |= LogError(device, vuid,
"%s: %s layout is %s but depth/stencil attachments must be "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_GENERAL, ",
"VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR or"
"VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.",
function_name, error_type.c_str(), string_VkImageLayout(image_layout));
break;
}
}
}
uint32_t last_sample_count_attachment = VK_ATTACHMENT_UNUSED;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pColorAttachments[" + std::to_string(j) + "]";
auto const &attachment_ref = subpass.pColorAttachments[j];
const uint32_t attachment_index = attachment_ref.attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
// safe to dereference pCreateInfo->pAttachments[]
if (attachment_index < pCreateInfo->attachmentCount) {
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format;
skip |= ValidateAttachmentReference(rp_version, attachment_ref, attachment_format, false, error_type.c_str(),
function_name);
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_COLOR,
attachment_ref.layout);
VkSampleCountFlagBits current_sample_count = pCreateInfo->pAttachments[attachment_index].samples;
if (last_sample_count_attachment != VK_ATTACHMENT_UNUSED) {
VkSampleCountFlagBits last_sample_count =
pCreateInfo->pAttachments[subpass.pColorAttachments[last_sample_count_attachment].attachment].samples;
if (current_sample_count != last_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03069"
: "VUID-VkSubpassDescription-pColorAttachments-01417";
skip |= LogError(
device, vuid,
"%s: Subpass %u attempts to render to color attachments with inconsistent sample counts."
"Color attachment ref %u has sample count %s, whereas previous color attachment ref %u has "
"sample count %s.",
function_name, i, j, string_VkSampleCountFlagBits(current_sample_count),
last_sample_count_attachment, string_VkSampleCountFlagBits(last_sample_count));
}
}
last_sample_count_attachment = j;
if (subpass_performs_resolve && current_sample_count == VK_SAMPLE_COUNT_1_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03066"
: "VUID-VkSubpassDescription-pResolveAttachments-00848";
skip |= LogError(device, vuid,
"%s: Subpass %u requests multisample resolve from attachment %u which has "
"VK_SAMPLE_COUNT_1_BIT.",
function_name, i, attachment_index);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED &&
subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) {
const auto depth_stencil_sample_count =
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
if (device_extensions.vk_amd_mixed_attachment_samples) {
if (current_sample_count > depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03070"
: "VUID-VkSubpassDescription-pColorAttachments-01506";
skip |=
LogError(device, vuid, "%s: %s has %s which is larger than depth/stencil attachment %s.",
function_name, error_type.c_str(), string_VkSampleCountFlagBits(current_sample_count),
string_VkSampleCountFlagBits(depth_stencil_sample_count));
break;
}
}
if (!device_extensions.vk_amd_mixed_attachment_samples &&
!device_extensions.vk_nv_framebuffer_mixed_samples &&
current_sample_count != depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-03071"
: "VUID-VkSubpassDescription-pDepthStencilAttachment-01418";
skip |= LogError(device, vuid,
"%s: Subpass %u attempts to render to use a depth/stencil attachment with sample "
"count that differs "
"from color attachment %u."
"The depth attachment ref has sample count %s, whereas color attachment ref %u has "
"sample count %s.",
function_name, i, j, string_VkSampleCountFlagBits(depth_stencil_sample_count), j,
string_VkSampleCountFlagBits(current_sample_count));
break;
}
}
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-02898"
: "VUID-VkSubpassDescription-pColorAttachments-02648";
skip |= LogError(device, vuid,
"%s: Color attachment %s format (%s) does not contain "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
if (attach_first_use[attachment_index]) {
skip |=
ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pColorAttachments[j].layout,
attachment_index, pCreateInfo->pAttachments[attachment_index]);
}
attach_first_use[attachment_index] = false;
}
// Check for valid imageLayout
vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437";
switch (attachment_ref.layout) {
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
case VK_IMAGE_LAYOUT_GENERAL:
break; // valid layouts
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR:
if (input_attachments.find(attachment_index) != input_attachments.end()) {
skip |= LogError(device, vuid,
"%s: %s is also an input attachment so the layout (%s) must not be "
"VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.",
function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout));
}
break;
default:
skip |= LogError(device, vuid,
"%s: %s layout is %s but color attachments must be "
"VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR, "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or "
"VK_IMAGE_LAYOUT_GENERAL.",
function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout));
break;
}
}
if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
subpass.pResolveAttachments[j].attachment < pCreateInfo->attachmentCount) {
if (attachment_index == VK_ATTACHMENT_UNUSED) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03065"
: "VUID-VkSubpassDescription-pResolveAttachments-00847";
skip |= LogError(device, vuid,
"%s: Subpass %u requests multisample resolve from attachment %u which has "
"attachment=VK_ATTACHMENT_UNUSED.",
function_name, i, attachment_index);
} else {
const auto &color_desc = pCreateInfo->pAttachments[attachment_index];
const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
if (color_desc.format != resolve_desc.format) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03068"
: "VUID-VkSubpassDescription-pResolveAttachments-00850";
skip |= LogError(device, vuid,
"%s: %s resolves to an attachment with a "
"different format. color format: %u, resolve format: %u.",
function_name, error_type.c_str(), color_desc.format, resolve_desc.format);
}
}
}
}
}
return skip;
}
bool CoreChecks::ValidateCreateRenderPass(VkDevice device, RenderPassCreateVersion rp_version,
const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
skip |= ValidateRenderpassAttachmentUsage(rp_version, pCreateInfo, function_name);
skip |= ValidateRenderPassDAG(rp_version, pCreateInfo);
// Validate multiview correlation and view masks
bool view_mask_zero = false;
bool view_mask_non_zero = false;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i];
if (subpass.viewMask != 0) {
view_mask_non_zero = true;
} else {
view_mask_zero = true;
}
if ((subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX) != 0 &&
(subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-flags-03076" : "VUID-VkSubpassDescription-flags-00856";
skip |= LogError(device, vuid,
"%s: The flags parameter of subpass description %u includes "
"VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX but does not also include "
"VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX.",
function_name, i);
}
}
if (rp_version == RENDER_PASS_VERSION_2) {
if (view_mask_non_zero && view_mask_zero) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03058",
"%s: Some view masks are non-zero whilst others are zero.", function_name);
}
if (view_mask_zero && pCreateInfo->correlatedViewMaskCount != 0) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03057",
"%s: Multiview is not enabled but correlation masks are still provided", function_name);
}
}
uint32_t aggregated_cvms = 0;
for (uint32_t i = 0; i < pCreateInfo->correlatedViewMaskCount; ++i) {
if (aggregated_cvms & pCreateInfo->pCorrelatedViewMasks[i]) {
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pCorrelatedViewMasks-03056"
: "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841";
skip |=
LogError(device, vuid, "%s: pCorrelatedViewMasks[%u] contains a previously appearing view bit.", function_name, i);
}
aggregated_cvms |= pCreateInfo->pCorrelatedViewMasks[i];
}
LogObjectList objects(device);
auto queue_flags = sync_utils::kAllQueueTypes;
auto func_name = use_rp2 ? Func::vkCreateRenderPass2 : Func::vkCreateRenderPass;
auto structure = use_rp2 ? Struct::VkSubpassDependency2 : Struct::VkSubpassDependency;
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
auto const &dependency = pCreateInfo->pDependencies[i];
Location loc(func_name, structure, Field::pDependencies, i);
skip |= ValidateSubpassBarrier(objects, loc, queue_flags, dependency);
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
bool skip = false;
// Handle extension structs from KHR_multiview and KHR_maintenance2 that can only be validated for RP1 (indices out of bounds)
const VkRenderPassMultiviewCreateInfo *multiview_info = LvlFindInChain<VkRenderPassMultiviewCreateInfo>(pCreateInfo->pNext);
if (multiview_info) {
if (multiview_info->subpassCount && multiview_info->subpassCount != pCreateInfo->subpassCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01928",
"vkCreateRenderPass(): Subpass count is %u but multiview info has a subpass count of %u.",
pCreateInfo->subpassCount, multiview_info->subpassCount);
} else if (multiview_info->dependencyCount && multiview_info->dependencyCount != pCreateInfo->dependencyCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01929",
"vkCreateRenderPass(): Dependency count is %u but multiview info has a dependency count of %u.",
pCreateInfo->dependencyCount, multiview_info->dependencyCount);
}
}
const VkRenderPassInputAttachmentAspectCreateInfo *input_attachment_aspect_info =
LvlFindInChain<VkRenderPassInputAttachmentAspectCreateInfo>(pCreateInfo->pNext);
if (input_attachment_aspect_info) {
for (uint32_t i = 0; i < input_attachment_aspect_info->aspectReferenceCount; ++i) {
uint32_t subpass = input_attachment_aspect_info->pAspectReferences[i].subpass;
uint32_t attachment = input_attachment_aspect_info->pAspectReferences[i].inputAttachmentIndex;
if (subpass >= pCreateInfo->subpassCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01926",
"vkCreateRenderPass(): Subpass index %u specified by input attachment aspect info %u is greater "
"than the subpass "
"count of %u for this render pass.",
subpass, i, pCreateInfo->subpassCount);
} else if (pCreateInfo->pSubpasses && attachment >= pCreateInfo->pSubpasses[subpass].inputAttachmentCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01927",
"vkCreateRenderPass(): Input attachment index %u specified by input attachment aspect info %u is "
"greater than the "
"input attachment count of %u for this subpass.",
attachment, i, pCreateInfo->pSubpasses[subpass].inputAttachmentCount);
}
}
}
const VkRenderPassFragmentDensityMapCreateInfoEXT *fragment_density_map_info =
LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(pCreateInfo->pNext);
if (fragment_density_map_info) {
if (fragment_density_map_info->fragmentDensityMapAttachment.attachment != VK_ATTACHMENT_UNUSED) {
if (fragment_density_map_info->fragmentDensityMapAttachment.attachment >= pCreateInfo->attachmentCount) {
skip |= LogError(device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02547",
"vkCreateRenderPass(): fragmentDensityMapAttachment %u must be less than attachmentCount %u of "
"for this render pass.",
fragment_density_map_info->fragmentDensityMapAttachment.attachment, pCreateInfo->attachmentCount);
} else {
if (!(fragment_density_map_info->fragmentDensityMapAttachment.layout ==
VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT ||
fragment_density_map_info->fragmentDensityMapAttachment.layout == VK_IMAGE_LAYOUT_GENERAL)) {
skip |= LogError(device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02549",
"vkCreateRenderPass(): Layout of fragmentDensityMapAttachment %u' must be equal to "
"VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT, or VK_IMAGE_LAYOUT_GENERAL.",
fragment_density_map_info->fragmentDensityMapAttachment.attachment);
}
if (!(pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].loadOp ==
VK_ATTACHMENT_LOAD_OP_LOAD ||
pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].loadOp ==
VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
skip |= LogError(
device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02550",
"vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a loadOp "
"equal to VK_ATTACHMENT_LOAD_OP_LOAD or VK_ATTACHMENT_LOAD_OP_DONT_CARE.",
fragment_density_map_info->fragmentDensityMapAttachment.attachment);
}
if (pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].storeOp !=
VK_ATTACHMENT_STORE_OP_DONT_CARE) {
skip |= LogError(
device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02551",
"vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a storeOp "
"equal to VK_ATTACHMENT_STORE_OP_DONT_CARE.",
fragment_density_map_info->fragmentDensityMapAttachment.attachment);
}
}
}
}
if (!skip) {
safe_VkRenderPassCreateInfo2 create_info_2;
ConvertVkRenderPassCreateInfoToV2KHR(*pCreateInfo, &create_info_2);
skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_1, create_info_2.ptr(), "vkCreateRenderPass()");
}
return skip;
}
bool CoreChecks::ValidateDepthStencilResolve(const VkPhysicalDeviceVulkan12Properties &core12_props,
const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const {
bool skip = false;
// If the pNext list of VkSubpassDescription2 includes a VkSubpassDescriptionDepthStencilResolve structure,
// then that structure describes depth/stencil resolve operations for the subpass.
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i];
const auto *resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass.pNext);
if (resolve == nullptr) {
continue;
}
const bool resolve_attachment_not_unused = (resolve->pDepthStencilResolveAttachment != nullptr &&
resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED);
const bool valid_resolve_attachment_index =
(resolve_attachment_not_unused && resolve->pDepthStencilResolveAttachment->attachment < pCreateInfo->attachmentCount);
const bool ds_attachment_not_unused =
(subpass.pDepthStencilAttachment != nullptr && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED);
const bool valid_ds_attachment_index =
(ds_attachment_not_unused && subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount);
if (resolve_attachment_not_unused && subpass.pDepthStencilAttachment != nullptr &&
subpass.pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03177",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u, but pDepthStencilAttachment=VK_ATTACHMENT_UNUSED.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (resolve_attachment_not_unused && resolve->depthResolveMode == VK_RESOLVE_MODE_NONE &&
resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03178",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u, but both depth and stencil resolve modes are "
"VK_RESOLVE_MODE_NONE.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (resolve_attachment_not_unused && valid_ds_attachment_index &&
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
skip |= LogError(
device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03179",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u. However pDepthStencilAttachment has sample count=VK_SAMPLE_COUNT_1_BIT.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (valid_resolve_attachment_index &&
pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03180",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u which has sample count=VK_SAMPLE_COUNT_1_BIT.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
VkFormat depth_stencil_attachment_format =
(valid_ds_attachment_index ? pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].format
: VK_FORMAT_UNDEFINED);
VkFormat depth_stencil_resolve_attachment_format =
(valid_resolve_attachment_index ? pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].format
: VK_FORMAT_UNDEFINED);
if (valid_ds_attachment_index && valid_resolve_attachment_index) {
const auto resolve_depth_size = FormatDepthSize(depth_stencil_resolve_attachment_format);
const auto resolve_stencil_size = FormatStencilSize(depth_stencil_resolve_attachment_format);
if (resolve_depth_size > 0 && ((FormatDepthSize(depth_stencil_attachment_format) != resolve_depth_size) ||
(FormatDepthNumericalType(depth_stencil_attachment_format) !=
FormatDepthNumericalType(depth_stencil_resolve_attachment_format)))) {
skip |= LogError(
device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03181",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u which has a depth component (size %u). The depth component "
"of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment, resolve_depth_size,
FormatDepthSize(depth_stencil_attachment_format));
}
if (resolve_stencil_size > 0 && ((FormatStencilSize(depth_stencil_attachment_format) != resolve_stencil_size) ||
(FormatStencilNumericalType(depth_stencil_attachment_format) !=
FormatStencilNumericalType(depth_stencil_resolve_attachment_format)))) {
skip |= LogError(
device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03182",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u which has a stencil component (size %u). The stencil component "
"of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment, resolve_stencil_size,
FormatStencilSize(depth_stencil_attachment_format));
}
}
if (!(resolve->depthResolveMode == VK_RESOLVE_MODE_NONE ||
resolve->depthResolveMode & core12_props.supportedDepthResolveModes)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-depthResolveMode-03183",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with invalid depthResolveMode=%u.",
function_name, i, resolve->depthResolveMode);
}
if (!(resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE ||
resolve->stencilResolveMode & core12_props.supportedStencilResolveModes)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-stencilResolveMode-03184",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with invalid stencilResolveMode=%u.",
function_name, i, resolve->stencilResolveMode);
}
if (valid_resolve_attachment_index && FormatIsDepthAndStencil(depth_stencil_resolve_attachment_format) &&
core12_props.independentResolve == VK_FALSE && core12_props.independentResolveNone == VK_FALSE &&
!(resolve->depthResolveMode == resolve->stencilResolveMode)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03185",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical.",
function_name, i, resolve->depthResolveMode, resolve->stencilResolveMode);
}
if (valid_resolve_attachment_index && FormatIsDepthAndStencil(depth_stencil_resolve_attachment_format) &&
core12_props.independentResolve == VK_FALSE && core12_props.independentResolveNone == VK_TRUE &&
!(resolve->depthResolveMode == resolve->stencilResolveMode || resolve->depthResolveMode == VK_RESOLVE_MODE_NONE ||
resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03186",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical, or "
"one of them must be %u.",
function_name, i, resolve->depthResolveMode, resolve->stencilResolveMode, VK_RESOLVE_MODE_NONE);
}
}
return skip;
}
bool CoreChecks::ValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass,
const char *function_name) const {
bool skip = false;
if (device_extensions.vk_khr_depth_stencil_resolve) {
skip |= ValidateDepthStencilResolve(phys_dev_props_core12, pCreateInfo, function_name);
}
skip |= ValidateFragmentShadingRateAttachments(device, pCreateInfo);
safe_VkRenderPassCreateInfo2 create_info_2(pCreateInfo);
skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_2, create_info_2.ptr(), function_name);
return skip;
}
bool CoreChecks::ValidateFragmentShadingRateAttachments(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo) const {
bool skip = false;
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
for (uint32_t attachment_description = 0; attachment_description < pCreateInfo->attachmentCount; ++attachment_description) {
std::vector<uint32_t> used_as_fragment_shading_rate_attachment;
// Prepass to find any use as a fragment shading rate attachment structures and validate them independently
for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) {
const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment =
LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(pCreateInfo->pSubpasses[subpass].pNext);
if (fragment_shading_rate_attachment && fragment_shading_rate_attachment->pFragmentShadingRateAttachment) {
const VkAttachmentReference2 &attachment_reference =
*(fragment_shading_rate_attachment->pFragmentShadingRateAttachment);
if (attachment_reference.attachment == attachment_description) {
used_as_fragment_shading_rate_attachment.push_back(subpass);
}
if (((pCreateInfo->flags & VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM) != 0) &&
(attachment_reference.attachment != VK_ATTACHMENT_UNUSED)) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-flags-04521",
"vkCreateRenderPass2: Render pass includes VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM but "
"a fragment shading rate attachment is specified in subpass %u.",
subpass);
}
if (attachment_reference.attachment != VK_ATTACHMENT_UNUSED) {
const VkFormatFeatureFlags potential_format_features =
GetPotentialFormatFeatures(pCreateInfo->pAttachments[attachment_reference.attachment].format);
if (!(potential_format_features & VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR)) {
skip |=
LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-04586",
"vkCreateRenderPass2: Attachment description %u is used in subpass %u as a fragment "
"shading rate attachment, but specifies format %s, which does not support "
"VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR.",
attachment_reference.attachment, subpass,
string_VkFormat(pCreateInfo->pAttachments[attachment_reference.attachment].format));
}
if (attachment_reference.layout != VK_IMAGE_LAYOUT_GENERAL &&
attachment_reference.layout != VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR) {
skip |= LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04524",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u specifies a layout of %s.",
subpass, string_VkImageLayout(attachment_reference.layout));
}
if (!IsPowerOfTwo(fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width)) {
skip |=
LogError(device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04525",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a "
"non-power-of-two texel width of %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width);
}
if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width <
phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.width) {
LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04526",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel width of %u which "
"is lower than the advertised minimum width %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width,
phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.width);
}
if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width >
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.width) {
LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04527",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel width of %u which "
"is higher than the advertised maximum width %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width,
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.width);
}
if (!IsPowerOfTwo(fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height)) {
skip |=
LogError(device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04528",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a "
"non-power-of-two texel height of %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height);
}
if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height <
phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.height) {
LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04529",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel height of %u "
"which is lower than the advertised minimum height %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height,
phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.height);
}
if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height >
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.height) {
LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04530",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel height of %u "
"which is higher than the advertised maximum height %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height,
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.height);
}
uint32_t aspect_ratio = fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width /
fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height;
uint32_t inverse_aspect_ratio = fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height /
fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width;
if (aspect_ratio >
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSizeAspectRatio) {
LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04531",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel size of %u by %u, "
"which has an aspect ratio %u, which is higher than the advertised maximum aspect ratio %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width,
fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, aspect_ratio,
phys_dev_ext_props.fragment_shading_rate_props
.maxFragmentShadingRateAttachmentTexelSizeAspectRatio);
}
if (inverse_aspect_ratio >
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSizeAspectRatio) {
LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04532",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel size of %u by %u, "
"which has an inverse aspect ratio of %u, which is higher than the advertised maximum aspect ratio "
"%u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width,
fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, inverse_aspect_ratio,
phys_dev_ext_props.fragment_shading_rate_props
.maxFragmentShadingRateAttachmentTexelSizeAspectRatio);
}
}
}
}
// Lambda function turning a vector of integers into a string
auto vector_to_string = [&](std::vector<uint32_t> vector) {
std::stringstream ss;
size_t size = vector.size();
for (size_t i = 0; i < used_as_fragment_shading_rate_attachment.size(); i++) {
if (size == 2 && i == 1) {
ss << " and ";
} else if (size > 2 && i == size - 2) {
ss << ", and ";
} else if (i != 0) {
ss << ", ";
}
ss << vector[i];
}
return ss.str();
};
// Search for other uses of the same attachment
if (!used_as_fragment_shading_rate_attachment.empty()) {
for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) {
const VkSubpassDescription2 &subpass_info = pCreateInfo->pSubpasses[subpass];
const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve_attachment =
LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_info.pNext);
std::string fsr_attachment_subpasses_string = vector_to_string(used_as_fragment_shading_rate_attachment);
for (uint32_t attachment = 0; attachment < subpass_info.colorAttachmentCount; ++attachment) {
if (subpass_info.pColorAttachments[attachment].attachment == attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as color attachment %u in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass);
}
}
for (uint32_t attachment = 0; attachment < subpass_info.colorAttachmentCount; ++attachment) {
if (subpass_info.pResolveAttachments &&
subpass_info.pResolveAttachments[attachment].attachment == attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as color resolve attachment %u in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass);
}
}
for (uint32_t attachment = 0; attachment < subpass_info.inputAttachmentCount; ++attachment) {
if (subpass_info.pInputAttachments[attachment].attachment == attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as input attachment %u in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass);
}
}
if (subpass_info.pDepthStencilAttachment) {
if (subpass_info.pDepthStencilAttachment->attachment == attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as the depth/stencil attachment in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), subpass);
}
}
if (depth_stencil_resolve_attachment && depth_stencil_resolve_attachment->pDepthStencilResolveAttachment) {
if (depth_stencil_resolve_attachment->pDepthStencilResolveAttachment->attachment ==
attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as the depth/stencil resolve attachment in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), subpass);
}
}
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2KHR()");
}
bool CoreChecks::PreCallValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2()");
}
bool CoreChecks::ValidatePrimaryCommandBuffer(const CMD_BUFFER_STATE *pCB, char const *cmd_name, const char *error_code) const {
bool skip = false;
if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
skip |= LogError(pCB->commandBuffer, error_code, "Cannot execute command %s on a secondary command buffer.", cmd_name);
}
return skip;
}
bool CoreChecks::VerifyRenderAreaBounds(const VkRenderPassBeginInfo *pRenderPassBegin) const {
bool skip = false;
const safe_VkFramebufferCreateInfo *framebuffer_info = &GetFramebufferState(pRenderPassBegin->framebuffer)->createInfo;
if (pRenderPassBegin->renderArea.offset.x < 0 ||
(pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > framebuffer_info->width ||
pRenderPassBegin->renderArea.offset.y < 0 ||
(pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > framebuffer_info->height) {
skip |= static_cast<bool>(LogError(
pRenderPassBegin->renderPass, kVUID_Core_DrawState_InvalidRenderArea,
"Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width "
"%d, height %d. Framebuffer: width %d, height %d.",
pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
pRenderPassBegin->renderArea.extent.height, framebuffer_info->width, framebuffer_info->height));
}
return skip;
}
bool CoreChecks::VerifyFramebufferAndRenderPassImageViews(const VkRenderPassBeginInfo *pRenderPassBeginInfo,
const char *func_name) const {
bool skip = false;
const VkRenderPassAttachmentBeginInfo *render_pass_attachment_begin_info =
LvlFindInChain<VkRenderPassAttachmentBeginInfo>(pRenderPassBeginInfo->pNext);
if (render_pass_attachment_begin_info && render_pass_attachment_begin_info->attachmentCount != 0) {
const safe_VkFramebufferCreateInfo *framebuffer_create_info =
&GetFramebufferState(pRenderPassBeginInfo->framebuffer)->createInfo;
const VkFramebufferAttachmentsCreateInfo *framebuffer_attachments_create_info =
LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(framebuffer_create_info->pNext);
if ((framebuffer_create_info->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03207",
"%s: Image views specified at render pass begin, but framebuffer not created with "
"VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT",
func_name);
} else if (framebuffer_attachments_create_info) {
if (framebuffer_attachments_create_info->attachmentImageInfoCount !=
render_pass_attachment_begin_info->attachmentCount) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03208",
"%s: %u image views specified at render pass begin, but framebuffer "
"created expecting %u attachments",
func_name, render_pass_attachment_begin_info->attachmentCount,
framebuffer_attachments_create_info->attachmentImageInfoCount);
} else {
const safe_VkRenderPassCreateInfo2 *render_pass_create_info =
&GetRenderPassState(pRenderPassBeginInfo->renderPass)->createInfo;
for (uint32_t i = 0; i < render_pass_attachment_begin_info->attachmentCount; ++i) {
const auto image_view_state = GetImageViewState(render_pass_attachment_begin_info->pAttachments[i]);
const VkImageViewCreateInfo *image_view_create_info = &image_view_state->create_info;
const VkFramebufferAttachmentImageInfo *framebuffer_attachment_image_info =
&framebuffer_attachments_create_info->pAttachmentImageInfos[i];
const VkImageCreateInfo *image_create_info = &GetImageState(image_view_create_info->image)->createInfo;
if (framebuffer_attachment_image_info->flags != image_create_info->flags) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03209",
"%s: Image view #%u created from an image with flags set as 0x%X, "
"but image info #%u used to create the framebuffer had flags set as 0x%X",
func_name, i, image_create_info->flags, i, framebuffer_attachment_image_info->flags);
}
if (framebuffer_attachment_image_info->usage != image_view_state->inherited_usage) {
// Give clearer message if this error is due to the "inherited" part or not
if (image_create_info->usage == image_view_state->inherited_usage) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-04627",
"%s: Image view #%u created from an image with usage set as 0x%X, "
"but image info #%u used to create the framebuffer had usage set as 0x%X",
func_name, i, image_create_info->usage, i, framebuffer_attachment_image_info->usage);
} else {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-04627",
"%s: Image view #%u created from an image with usage set as 0x%X but using "
"VkImageViewUsageCreateInfo the inherited usage is the subset 0x%X "
"and the image info #%u used to create the framebuffer had usage set as 0x%X",
func_name, i, image_create_info->usage, image_view_state->inherited_usage, i,
framebuffer_attachment_image_info->usage);
}
}
uint32_t view_width = image_create_info->extent.width >> image_view_create_info->subresourceRange.baseMipLevel;
if (framebuffer_attachment_image_info->width != view_width) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03211",
"%s: Image view #%u created from an image subresource with width set as %u, "
"but image info #%u used to create the framebuffer had width set as %u",
func_name, i, view_width, i, framebuffer_attachment_image_info->width);
}
uint32_t view_height =
image_create_info->extent.height >> image_view_create_info->subresourceRange.baseMipLevel;
if (framebuffer_attachment_image_info->height != view_height) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03212",
"%s: Image view #%u created from an image subresource with height set as %u, "
"but image info #%u used to create the framebuffer had height set as %u",
func_name, i, view_height, i, framebuffer_attachment_image_info->height);
}
if (framebuffer_attachment_image_info->layerCount != image_view_create_info->subresourceRange.layerCount) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03213",
"%s: Image view #%u created with a subresource range with a layerCount of %u, "
"but image info #%u used to create the framebuffer had layerCount set as %u",
func_name, i, image_view_create_info->subresourceRange.layerCount, i,
framebuffer_attachment_image_info->layerCount);
}
const VkImageFormatListCreateInfo *image_format_list_create_info =
LvlFindInChain<VkImageFormatListCreateInfo>(image_create_info->pNext);
if (image_format_list_create_info) {
if (image_format_list_create_info->viewFormatCount != framebuffer_attachment_image_info->viewFormatCount) {
skip |= LogError(
pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03214",
"VkRenderPassBeginInfo: Image view #%u created with an image with a viewFormatCount of %u, "
"but image info #%u used to create the framebuffer had viewFormatCount set as %u",
i, image_format_list_create_info->viewFormatCount, i,
framebuffer_attachment_image_info->viewFormatCount);
}
for (uint32_t j = 0; j < image_format_list_create_info->viewFormatCount; ++j) {
bool format_found = false;
for (uint32_t k = 0; k < framebuffer_attachment_image_info->viewFormatCount; ++k) {
if (image_format_list_create_info->pViewFormats[j] ==
framebuffer_attachment_image_info->pViewFormats[k]) {
format_found = true;
}
}
if (!format_found) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03215",
"VkRenderPassBeginInfo: Image view #%u created with an image including the format "
"%s in its view format list, "
"but image info #%u used to create the framebuffer does not include this format",
i, string_VkFormat(image_format_list_create_info->pViewFormats[j]), i);
}
}
}
if (render_pass_create_info->pAttachments[i].format != image_view_create_info->format) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03216",
"%s: Image view #%u created with a format of %s, "
"but render pass attachment description #%u created with a format of %s",
func_name, i, string_VkFormat(image_view_create_info->format), i,
string_VkFormat(render_pass_create_info->pAttachments[i].format));
}
if (render_pass_create_info->pAttachments[i].samples != image_create_info->samples) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03217",
"%s: Image view #%u created with an image with %s samples, "
"but render pass attachment description #%u created with %s samples",
func_name, i, string_VkSampleCountFlagBits(image_create_info->samples), i,
string_VkSampleCountFlagBits(render_pass_create_info->pAttachments[i].samples));
}
if (image_view_create_info->subresourceRange.levelCount != 1) {
skip |= LogError(render_pass_attachment_begin_info->pAttachments[i],
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03218",
"%s: Image view #%u created with multiple (%u) mip levels.", func_name, i,
image_view_create_info->subresourceRange.levelCount);
}
if (IsIdentitySwizzle(image_view_create_info->components) == false) {
skip |= LogError(
render_pass_attachment_begin_info->pAttachments[i],
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03219",
"%s: Image view #%u created with non-identity swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
func_name, i, string_VkComponentSwizzle(image_view_create_info->components.r),
string_VkComponentSwizzle(image_view_create_info->components.g),
string_VkComponentSwizzle(image_view_create_info->components.b),
string_VkComponentSwizzle(image_view_create_info->components.a));
}
if (image_view_create_info->viewType == VK_IMAGE_VIEW_TYPE_3D) {
skip |= LogError(render_pass_attachment_begin_info->pAttachments[i],
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-04114",
"%s: Image view #%u created with type VK_IMAGE_VIEW_TYPE_3D", func_name, i);
}
}
}
}
}
return skip;
}
// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
// [load|store]Op flag must be checked
// TODO: The memory valid flag in DEVICE_MEMORY_STATE should probably be split to track the validity of stencil memory separately.
template <typename T>
static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
if (color_depth_op != op && stencil_op != op) {
return false;
}
bool check_color_depth_load_op = !FormatIsStencilOnly(format);
bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
}
bool CoreChecks::ValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version,
const VkRenderPassBeginInfo *pRenderPassBegin) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *const function_name = use_rp2 ? "vkCmdBeginRenderPass2()" : "vkCmdBeginRenderPass()";
if (render_pass_state) {
uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
// Handle extension struct from EXT_sample_locations
const VkRenderPassSampleLocationsBeginInfoEXT *sample_locations_begin_info =
LvlFindInChain<VkRenderPassSampleLocationsBeginInfoEXT>(pRenderPassBegin->pNext);
if (sample_locations_begin_info) {
for (uint32_t i = 0; i < sample_locations_begin_info->attachmentInitialSampleLocationsCount; ++i) {
const VkAttachmentSampleLocationsEXT &sample_location =
sample_locations_begin_info->pAttachmentInitialSampleLocations[i];
skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name);
if (sample_location.attachmentIndex >= render_pass_state->createInfo.attachmentCount) {
skip |=
LogError(device, "VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531",
"%s: Attachment index %u specified by attachment sample locations %u is greater than the "
"attachment count of %u for the render pass being begun.",
function_name, sample_location.attachmentIndex, i, render_pass_state->createInfo.attachmentCount);
}
}
for (uint32_t i = 0; i < sample_locations_begin_info->postSubpassSampleLocationsCount; ++i) {
const VkSubpassSampleLocationsEXT &sample_location = sample_locations_begin_info->pPostSubpassSampleLocations[i];
skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name);
if (sample_location.subpassIndex >= render_pass_state->createInfo.subpassCount) {
skip |=
LogError(device, "VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532",
"%s: Subpass index %u specified by subpass sample locations %u is greater than the subpass count "
"of %u for the render pass being begun.",
function_name, sample_location.subpassIndex, i, render_pass_state->createInfo.subpassCount);
}
}
}
for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
auto attachment = &render_pass_state->createInfo.pAttachments[i];
if (FormatSpecificLoadAndStoreOpSettings(attachment->format, attachment->loadOp, attachment->stencilLoadOp,
VK_ATTACHMENT_LOAD_OP_CLEAR)) {
clear_op_size = static_cast<uint32_t>(i) + 1;
if (FormatHasDepth(attachment->format)) {
skip |= ValidateClearDepthStencilValue(commandBuffer, pRenderPassBegin->pClearValues[i].depthStencil,
function_name);
}
}
}
if (clear_op_size > pRenderPassBegin->clearValueCount) {
skip |= LogError(render_pass_state->renderPass, "VUID-VkRenderPassBeginInfo-clearValueCount-00902",
"In %s the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
"must be at least %u entries in pClearValues array to account for the highest index attachment in "
"%s that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
"attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
"that aren't cleared they will be ignored.",
function_name, pRenderPassBegin->clearValueCount, clear_op_size,
report_data->FormatHandle(render_pass_state->renderPass).c_str(), clear_op_size, clear_op_size - 1);
}
skip |= VerifyFramebufferAndRenderPassImageViews(pRenderPassBegin, function_name);
skip |= VerifyRenderAreaBounds(pRenderPassBegin);
skip |= VerifyFramebufferAndRenderPassLayouts(rp_version, cb_state, pRenderPassBegin,
GetFramebufferState(pRenderPassBegin->framebuffer));
if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) {
skip |= ValidateRenderPassCompatibility("render pass", render_pass_state, "framebuffer", framebuffer->rp_state.get(),
function_name, "VUID-VkRenderPassBeginInfo-renderPass-00904");
}
skip |= ValidateDependencies(framebuffer, render_pass_state);
const CMD_TYPE cmd_type = use_rp2 ? CMD_BEGINRENDERPASS2 : CMD_BEGINRENDERPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
}
auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext);
if (chained_device_group_struct) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass,
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00905");
skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass,
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00906");
skip |= ValidateDeviceMaskToCommandBuffer(cb_state, chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass,
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00907");
if (chained_device_group_struct->deviceRenderAreaCount != 0 &&
chained_device_group_struct->deviceRenderAreaCount != physical_device_count) {
skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceRenderAreaCount-00908",
"%s: deviceRenderAreaCount[%" PRIu32 "] is invaild. Physical device count is %" PRIu32 ".",
function_name, chained_device_group_struct->deviceRenderAreaCount, physical_device_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin);
return skip;
}
void CoreChecks::RecordCmdBeginRenderPassLayouts(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
if (render_pass_state) {
// transition attachments to the correct layouts for beginning of renderPass and first subpass
TransitionBeginRenderPassLayouts(cb_state, render_pass_state, framebuffer);
}
}
void CoreChecks::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) {
StateTracker::PreCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, contents);
}
void CoreChecks::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) {
StateTracker::PreCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents);
}
void CoreChecks::PreCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) {
StateTracker::PreCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents);
}
bool CoreChecks::ValidateCmdNextSubpass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdNextSubpass2()" : "vkCmdNextSubpass()";
const CMD_TYPE cmd_type = use_rp2 ? CMD_NEXTSUBPASS2 : CMD_NEXTSUBPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
auto subpass_count = cb_state->activeRenderPass->createInfo.subpassCount;
if (cb_state->activeSubpass == subpass_count - 1) {
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-None-03102" : "VUID-vkCmdNextSubpass-None-00909";
skip |= LogError(commandBuffer, vuid, "%s: Attempted to advance beyond final subpass.", function_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_1, commandBuffer);
}
bool CoreChecks::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer);
}
bool CoreChecks::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer);
}
void CoreChecks::RecordCmdNextSubpassLayouts(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
TransitionSubpassLayouts(cb_state, cb_state->activeRenderPass.get(), cb_state->activeSubpass,
Get<FRAMEBUFFER_STATE>(cb_state->activeRenderPassBeginInfo.framebuffer));
}
void CoreChecks::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
RecordCmdNextSubpassLayouts(commandBuffer, contents);
}
void CoreChecks::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) {
StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents);
}
void CoreChecks::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) {
StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents);
}
bool CoreChecks::ValidateCmdEndRenderPass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdEndRenderPass2KHR()" : "vkCmdEndRenderPass()";
RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass.get();
if (rp_state) {
if (cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) {
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-None-03103" : "VUID-vkCmdEndRenderPass-None-00910";
skip |= LogError(commandBuffer, vuid, "%s: Called before reaching final subpass.", function_name);
}
}
const CMD_TYPE cmd_type = use_rp2 ? CMD_ENDRENDERPASS2 : CMD_ENDRENDERPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_1, commandBuffer);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer);
return skip;
}
void CoreChecks::RecordCmdEndRenderPassLayouts(VkCommandBuffer commandBuffer) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
TransitionFinalSubpassLayouts(cb_state, cb_state->activeRenderPassBeginInfo.ptr(), cb_state->activeFramebuffer.get());
}
void CoreChecks::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
// Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need.
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
}
void CoreChecks::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
// Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need.
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
}
void CoreChecks::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
}
bool CoreChecks::ValidateFramebuffer(VkCommandBuffer primaryBuffer, const CMD_BUFFER_STATE *pCB, VkCommandBuffer secondaryBuffer,
const CMD_BUFFER_STATE *pSubCB, const char *caller) const {
bool skip = false;
if (!pSubCB->beginInfo.pInheritanceInfo) {
return skip;
}
VkFramebuffer primary_fb = pCB->activeFramebuffer ? pCB->activeFramebuffer->framebuffer : VK_NULL_HANDLE;
VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
if (secondary_fb != VK_NULL_HANDLE) {
if (primary_fb != secondary_fb) {
LogObjectList objlist(primaryBuffer);
objlist.add(secondaryBuffer);
objlist.add(secondary_fb);
objlist.add(primary_fb);
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00099",
"vkCmdExecuteCommands() called w/ invalid secondary %s which has a %s"
" that is not the same as the primary command buffer's current active %s.",
report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str(),
report_data->FormatHandle(primary_fb).c_str());
}
auto fb = GetFramebufferState(secondary_fb);
if (!fb) {
LogObjectList objlist(primaryBuffer);
objlist.add(secondaryBuffer);
objlist.add(secondary_fb);
skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid %s which has invalid %s.",
report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str());
return skip;
}
}
return skip;
}
bool CoreChecks::ValidateSecondaryCommandBufferState(const CMD_BUFFER_STATE *pCB, const CMD_BUFFER_STATE *pSubCB) const {
bool skip = false;
layer_data::unordered_set<int> active_types;
if (!disabled[query_validation]) {
for (const auto &query_object : pCB->activeQueries) {
auto query_pool_state = GetQueryPoolState(query_object.pool);
if (query_pool_state) {
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
pSubCB->beginInfo.pInheritanceInfo) {
VkQueryPipelineStatisticFlags cmd_buf_statistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
if ((cmd_buf_statistics & query_pool_state->createInfo.pipelineStatistics) != cmd_buf_statistics) {
LogObjectList objlist(pCB->commandBuffer);
objlist.add(query_object.pool);
skip |= LogError(
objlist, "VUID-vkCmdExecuteCommands-commandBuffer-00104",
"vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s"
". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(query_object.pool).c_str());
}
}
active_types.insert(query_pool_state->createInfo.queryType);
}
}
for (const auto &query_object : pSubCB->startedQueries) {
auto query_pool_state = GetQueryPoolState(query_object.pool);
if (query_pool_state && active_types.count(query_pool_state->createInfo.queryType)) {
LogObjectList objlist(pCB->commandBuffer);
objlist.add(query_object.pool);
skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s"
" of type %d but a query of that type has been started on secondary %s.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(query_object.pool).c_str(), query_pool_state->createInfo.queryType,
report_data->FormatHandle(pSubCB->commandBuffer).c_str());
}
}
}
auto primary_pool = pCB->command_pool.get();
auto secondary_pool = pSubCB->command_pool.get();
if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
LogObjectList objlist(pSubCB->commandBuffer);
objlist.add(pCB->commandBuffer);
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00094",
"vkCmdExecuteCommands(): Primary %s created in queue family %d has secondary "
"%s created in queue family %d.",
report_data->FormatHandle(pCB->commandBuffer).c_str(), primary_pool->queueFamilyIndex,
report_data->FormatHandle(pSubCB->commandBuffer).c_str(), secondary_pool->queueFamilyIndex);
}
return skip;
}
// Object that simulates the inherited viewport/scissor state as the device executes the called secondary command buffers.
// Visit the calling primary command buffer first, then the called secondaries in order.
// Contact David Zhao Akeley <dakeley@nvidia.com> for clarifications and bug fixes.
class CoreChecks::ViewportScissorInheritanceTracker {
static_assert(4 == sizeof(CMD_BUFFER_STATE::viewportMask), "Adjust max_viewports to match viewportMask bit width");
static constexpr uint32_t kMaxViewports = 32, kNotTrashed = uint32_t(-2), kTrashedByPrimary = uint32_t(-1);
const ValidationObject &validation_;
const CMD_BUFFER_STATE *primary_state_ = nullptr;
uint32_t viewport_mask_;
uint32_t scissor_mask_;
uint32_t viewport_trashed_by_[kMaxViewports]; // filled in VisitPrimary.
uint32_t scissor_trashed_by_[kMaxViewports];
VkViewport viewports_to_inherit_[kMaxViewports];
uint32_t viewport_count_to_inherit_; // 0 if viewport count (EXT state) has never been defined (but not trashed)
uint32_t scissor_count_to_inherit_; // 0 if scissor count (EXT state) has never been defined (but not trashed)
uint32_t viewport_count_trashed_by_;
uint32_t scissor_count_trashed_by_;
public:
ViewportScissorInheritanceTracker(const ValidationObject &validation) : validation_(validation) {}
bool VisitPrimary(const CMD_BUFFER_STATE *primary_state) {
assert(!primary_state_);
primary_state_ = primary_state;
viewport_mask_ = primary_state->viewportMask | primary_state->viewportWithCountMask;
scissor_mask_ = primary_state->scissorMask | primary_state->scissorWithCountMask;
for (uint32_t n = 0; n < kMaxViewports; ++n) {
uint32_t bit = uint32_t(1) << n;
viewport_trashed_by_[n] = primary_state->trashedViewportMask & bit ? kTrashedByPrimary : kNotTrashed;
scissor_trashed_by_[n] = primary_state->trashedScissorMask & bit ? kTrashedByPrimary : kNotTrashed;
if (viewport_mask_ & bit) {
viewports_to_inherit_[n] = primary_state->dynamicViewports[n];
}
}
viewport_count_to_inherit_ = primary_state->viewportWithCountCount;
scissor_count_to_inherit_ = primary_state->scissorWithCountCount;
viewport_count_trashed_by_ = primary_state->trashedViewportCount ? kTrashedByPrimary : kNotTrashed;
scissor_count_trashed_by_ = primary_state->trashedScissorCount ? kTrashedByPrimary : kNotTrashed;
return false;
}
bool VisitSecondary(uint32_t cmd_buffer_idx, const CMD_BUFFER_STATE *secondary_state) {
bool skip = false;
if (secondary_state->inheritedViewportDepths.empty()) {
skip |= VisitSecondaryNoInheritance(cmd_buffer_idx, secondary_state);
} else {
skip |= VisitSecondaryInheritance(cmd_buffer_idx, secondary_state);
}
// See note at end of VisitSecondaryNoInheritance.
if (secondary_state->trashedViewportCount) {
viewport_count_trashed_by_ = cmd_buffer_idx;
}
if (secondary_state->trashedScissorCount) {
scissor_count_trashed_by_ = cmd_buffer_idx;
}
return skip;
}
private:
// Track state inheritance as specified by VK_NV_inherited_scissor_viewport, including states
// overwritten to undefined value by bound pipelines with non-dynamic state.
bool VisitSecondaryNoInheritance(uint32_t cmd_buffer_idx, const CMD_BUFFER_STATE *secondary_state) {
viewport_mask_ |= secondary_state->viewportMask | secondary_state->viewportWithCountMask;
scissor_mask_ |= secondary_state->scissorMask | secondary_state->scissorWithCountMask;
for (uint32_t n = 0; n < kMaxViewports; ++n) {
uint32_t bit = uint32_t(1) << n;
if ((secondary_state->viewportMask | secondary_state->viewportWithCountMask) & bit) {
viewports_to_inherit_[n] = secondary_state->dynamicViewports[n];
viewport_trashed_by_[n] = kNotTrashed;
}
if ((secondary_state->scissorMask | secondary_state->scissorWithCountMask) & bit) {
scissor_trashed_by_[n] = kNotTrashed;
}
if (secondary_state->viewportWithCountCount != 0) {
viewport_count_to_inherit_ = secondary_state->viewportWithCountCount;
viewport_count_trashed_by_ = kNotTrashed;
}
if (secondary_state->scissorWithCountCount != 0) {
scissor_count_to_inherit_ = secondary_state->scissorWithCountCount;
scissor_count_trashed_by_ = kNotTrashed;
}
// Order of above vs below matters here.
if (secondary_state->trashedViewportMask & bit) {
viewport_trashed_by_[n] = cmd_buffer_idx;
}
if (secondary_state->trashedScissorMask & bit) {
scissor_trashed_by_[n] = cmd_buffer_idx;
}
// Check trashing dynamic viewport/scissor count in VisitSecondary (at end) as even secondary command buffers enabling
// viewport/scissor state inheritance may define this state statically in bound graphics pipelines.
}
return false;
}
// Validate needed inherited state as specified by VK_NV_inherited_scissor_viewport.
bool VisitSecondaryInheritance(uint32_t cmd_buffer_idx, const CMD_BUFFER_STATE *secondary_state) {
bool skip = false;
uint32_t check_viewport_count = 0, check_scissor_count = 0;
// Common code for reporting missing inherited state (for a myriad of reasons).
auto check_missing_inherit = [&](uint32_t was_ever_defined, uint32_t trashed_by, VkDynamicState state, uint32_t index = 0,
uint32_t static_use_count = 0, const VkViewport *inherited_viewport = nullptr,
const VkViewport *expected_viewport_depth = nullptr) {
if (was_ever_defined && trashed_by == kNotTrashed) {
if (state != VK_DYNAMIC_STATE_VIEWPORT) return false;
assert(inherited_viewport != nullptr && expected_viewport_depth != nullptr);
if (inherited_viewport->minDepth != expected_viewport_depth->minDepth ||
inherited_viewport->maxDepth != expected_viewport_depth->maxDepth) {
return validation_.LogError(
primary_state_->commandBuffer, "VUID-vkCmdDraw-commandBuffer-02701",
"vkCmdExecuteCommands(): Draw commands in pCommandBuffers[%u] (%s) consume inherited viewport %u %s"
"but this state was not inherited as its depth range [%f, %f] does not match "
"pViewportDepths[%u] = [%f, %f]",
unsigned(cmd_buffer_idx), validation_.report_data->FormatHandle(secondary_state->commandBuffer).c_str(),
unsigned(index), index >= static_use_count ? "(with count) " : "", inherited_viewport->minDepth,
inherited_viewport->maxDepth, unsigned(cmd_buffer_idx), expected_viewport_depth->minDepth,
expected_viewport_depth->maxDepth);
// akeley98 note: This VUID is not ideal; however, there isn't a more relevant VUID as
// it isn't illegal in itself to have mismatched inherited viewport depths.
// The error only occurs upon attempting to consume the viewport.
} else {
return false;
}
}
const char *state_name;
bool format_index = false;
switch (state) {
case VK_DYNAMIC_STATE_SCISSOR:
state_name = "scissor";
format_index = true;
break;
case VK_DYNAMIC_STATE_VIEWPORT:
state_name = "viewport";
format_index = true;
break;
case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT:
state_name = "dynamic viewport count";
break;
case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT:
state_name = "dynamic scissor count";
break;
default:
assert(0);
state_name = "<unknown state, report bug>";
break;
}
std::stringstream ss;
ss << "vkCmdExecuteCommands(): Draw commands in pCommandBuffers[" << cmd_buffer_idx << "] ("
<< validation_.report_data->FormatHandle(secondary_state->commandBuffer).c_str() << ") consume inherited "
<< state_name << " ";
if (format_index) {
if (index >= static_use_count) {
ss << "(with count) ";
}
ss << index << " ";
}
ss << "but this state ";
if (!was_ever_defined) {
ss << "was never defined.";
} else if (trashed_by == kTrashedByPrimary) {
ss << "was left undefined after vkCmdExecuteCommands or vkCmdBindPipeline (with non-dynamic state) in "
"the calling primary command buffer.";
} else {
ss << "was left undefined after vkCmdBindPipeline (with non-dynamic state) in pCommandBuffers[" << trashed_by
<< "].";
}
return validation_.LogError(primary_state_->commandBuffer, "VUID-vkCmdDraw-commandBuffer-02701", ss.str().c_str());
};
// Check if secondary command buffer uses viewport/scissor-with-count state, and validate this state if so.
if (secondary_state->usedDynamicViewportCount) {
if (viewport_count_to_inherit_ == 0 || viewport_count_trashed_by_ != kNotTrashed) {
skip |= check_missing_inherit(viewport_count_to_inherit_, viewport_count_trashed_by_,
VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT);
} else {
check_viewport_count = viewport_count_to_inherit_;
}
}
if (secondary_state->usedDynamicScissorCount) {
if (scissor_count_to_inherit_ == 0 || scissor_count_trashed_by_ != kNotTrashed) {
skip |= check_missing_inherit(scissor_count_to_inherit_, scissor_count_trashed_by_,
VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT);
} else {
check_scissor_count = scissor_count_to_inherit_;
}
}
// Check the maximum of (viewports used by pipelines with static viewport count, "" dynamic viewport count)
// but limit to length of inheritedViewportDepths array and uint32_t bit width (validation layer limit).
check_viewport_count = std::min(std::min(kMaxViewports, uint32_t(secondary_state->inheritedViewportDepths.size())),
std::max(check_viewport_count, secondary_state->usedViewportScissorCount));
check_scissor_count = std::min(kMaxViewports, std::max(check_scissor_count, secondary_state->usedViewportScissorCount));
if (secondary_state->usedDynamicViewportCount &&
viewport_count_to_inherit_ > secondary_state->inheritedViewportDepths.size()) {
skip |= validation_.LogError(
primary_state_->commandBuffer, "VUID-vkCmdDraw-commandBuffer-02701",
"vkCmdExecuteCommands(): "
"Draw commands in pCommandBuffers[%u] (%s) consume inherited dynamic viewport with count state "
"but the dynamic viewport count (%u) exceeds the inheritance limit (viewportDepthCount=%u).",
unsigned(cmd_buffer_idx), validation_.report_data->FormatHandle(secondary_state->commandBuffer).c_str(),
unsigned(viewport_count_to_inherit_), unsigned(secondary_state->inheritedViewportDepths.size()));
}
for (uint32_t n = 0; n < check_viewport_count; ++n) {
skip |= check_missing_inherit(viewport_mask_ & uint32_t(1) << n, viewport_trashed_by_[n], VK_DYNAMIC_STATE_VIEWPORT, n,
secondary_state->usedViewportScissorCount, &viewports_to_inherit_[n],
&secondary_state->inheritedViewportDepths[n]);
}
for (uint32_t n = 0; n < check_scissor_count; ++n) {
skip |= check_missing_inherit(scissor_mask_ & uint32_t(1) << n, scissor_trashed_by_[n], VK_DYNAMIC_STATE_SCISSOR, n,
secondary_state->usedViewportScissorCount);
}
return skip;
}
};
constexpr uint32_t CoreChecks::ViewportScissorInheritanceTracker::kMaxViewports;
constexpr uint32_t CoreChecks::ViewportScissorInheritanceTracker::kNotTrashed;
constexpr uint32_t CoreChecks::ViewportScissorInheritanceTracker::kTrashedByPrimary;
bool CoreChecks::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
const VkCommandBuffer *pCommandBuffers) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const CMD_BUFFER_STATE *sub_cb_state = NULL;
layer_data::unordered_set<const CMD_BUFFER_STATE *> linked_command_buffers;
ViewportScissorInheritanceTracker viewport_scissor_inheritance{*this};
if (enabled_features.inherited_viewport_scissor_features.inheritedViewportScissor2D)
{
skip |= viewport_scissor_inheritance.VisitPrimary(cb_state);
}
for (uint32_t i = 0; i < commandBuffersCount; i++) {
sub_cb_state = GetCBState(pCommandBuffers[i]);
assert(sub_cb_state);
if (enabled_features.inherited_viewport_scissor_features.inheritedViewportScissor2D)
{
skip |= viewport_scissor_inheritance.VisitSecondary(i, sub_cb_state);
}
if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == sub_cb_state->createInfo.level) {
skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00088",
"vkCmdExecuteCommands() called w/ Primary %s in element %u of pCommandBuffers array. All "
"cmd buffers in pCommandBuffers array must be secondary.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(), i);
} else if (VK_COMMAND_BUFFER_LEVEL_SECONDARY == sub_cb_state->createInfo.level) {
if (sub_cb_state->beginInfo.pInheritanceInfo != nullptr) {
const auto secondary_rp_state = GetRenderPassState(sub_cb_state->beginInfo.pInheritanceInfo->renderPass);
if (cb_state->activeRenderPass &&
!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
LogObjectList objlist(pCommandBuffers[i]);
objlist.add(cb_state->activeRenderPass->renderPass);
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00096",
"vkCmdExecuteCommands(): Secondary %s is executed within a %s "
"instance scope, but the Secondary Command Buffer does not have the "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when "
"the vkBeginCommandBuffer() was called.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(),
report_data->FormatHandle(cb_state->activeRenderPass->renderPass).c_str());
} else if (!cb_state->activeRenderPass &&
(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00100",
"vkCmdExecuteCommands(): Secondary %s is executed outside a render pass "
"instance scope, but the Secondary Command Buffer does have the "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when "
"the vkBeginCommandBuffer() was called.",
report_data->FormatHandle(pCommandBuffers[i]).c_str());
} else if (cb_state->activeRenderPass &&
(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// Make sure render pass is compatible with parent command buffer pass if has continue
if (cb_state->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
skip |= ValidateRenderPassCompatibility(
"primary command buffer", cb_state->activeRenderPass.get(), "secondary command buffer",
secondary_rp_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pInheritanceInfo-00098");
}
// If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
skip |=
ValidateFramebuffer(commandBuffer, cb_state, pCommandBuffers[i], sub_cb_state, "vkCmdExecuteCommands()");
if (!sub_cb_state->cmd_execute_commands_functions.empty()) {
// Inherit primary's activeFramebuffer and while running validate functions
for (auto &function : sub_cb_state->cmd_execute_commands_functions) {
skip |= function(cb_state, cb_state->activeFramebuffer.get());
}
}
}
}
}
// TODO(mlentine): Move more logic into this method
skip |= ValidateSecondaryCommandBufferState(cb_state, sub_cb_state);
skip |= ValidateCommandBufferState(sub_cb_state, "vkCmdExecuteCommands()", 0,
"VUID-vkCmdExecuteCommands-pCommandBuffers-00089");
if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
if (sub_cb_state->in_use.load()) {
skip |= LogError(
cb_state->commandBuffer, "VUID-vkCmdExecuteCommands-pCommandBuffers-00091",
"vkCmdExecuteCommands(): Cannot execute pending %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str());
}
// We use an const_cast, because one cannot query a container keyed on a non-const pointer using a const pointer
if (cb_state->linkedCommandBuffers.count(const_cast<CMD_BUFFER_STATE *>(sub_cb_state))) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(sub_cb_state->commandBuffer);
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00092",
"vkCmdExecuteCommands(): Cannot execute %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
"set if previously executed in %s",
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str(),
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
const auto insert_pair = linked_command_buffers.insert(sub_cb_state);
if (!insert_pair.second) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdExecuteCommands-pCommandBuffers-00093",
"vkCmdExecuteCommands(): Cannot duplicate %s in pCommandBuffers without "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
// Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
LogObjectList objlist(pCommandBuffers[i]);
objlist.add(cb_state->commandBuffer);
skip |= LogWarning(objlist, kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse,
"vkCmdExecuteCommands(): Secondary %s does not have "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
"%s to be treated as if it does not have "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even though it does.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(),
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
}
if (!cb_state->activeQueries.empty() && !enabled_features.core.inheritedQueries) {
skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-commandBuffer-00101",
"vkCmdExecuteCommands(): Secondary %s cannot be submitted with a query in flight and "
"inherited queries not supported on this device.",
report_data->FormatHandle(pCommandBuffers[i]).c_str());
}
// Validate initial layout uses vs. the primary cmd buffer state
// Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"
// initial layout usage of secondary command buffers resources must match parent command buffer
const auto *const_cb_state = static_cast<const CMD_BUFFER_STATE *>(cb_state);
for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) {
const auto image = sub_layout_map_entry.first;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't set layouts of a dead image
const auto *cb_subres_map = GetImageSubresourceLayoutMap(const_cb_state, image);
// Const getter can be null in which case we have nothing to check against for this image...
if (!cb_subres_map) continue;
const auto *sub_cb_subres_map = &sub_layout_map_entry.second;
// Validate the initial_uses, that they match the current state of the primary cb, or absent a current state,
// that the match any initial_layout.
for (const auto &subres_layout : *sub_cb_subres_map) {
const auto &sub_layout = subres_layout.initial_layout;
const auto &subresource = subres_layout.subresource;
if (VK_IMAGE_LAYOUT_UNDEFINED == sub_layout) continue; // secondary doesn't care about current or initial
// Look up the layout to compared to the intial layout of the sub command buffer (current else initial)
const auto *cb_layouts = cb_subres_map->GetSubresourceLayouts(subresource);
auto cb_layout = cb_layouts ? cb_layouts->current_layout : kInvalidLayout;
const char *layout_type = "current";
if (cb_layout == kInvalidLayout) {
cb_layout = cb_layouts ? cb_layouts->initial_layout : kInvalidLayout;
layout_type = "initial";
}
if ((cb_layout != kInvalidLayout) && (cb_layout != sub_layout)) {
skip |= LogError(pCommandBuffers[i], "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001",
"%s: Executed secondary command buffer using %s (subresource: aspectMask 0x%X array layer %u, "
"mip level %u) which expects layout %s--instead, image %s layout is %s.",
"vkCmdExecuteCommands():", report_data->FormatHandle(image).c_str(), subresource.aspectMask,
subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(sub_layout), layout_type,
string_VkImageLayout(cb_layout));
}
}
}
// All commands buffers involved must be protected or unprotected
if ((cb_state->unprotected == false) && (sub_cb_state->unprotected == true)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(sub_cb_state->commandBuffer);
skip |= LogError(
objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01820",
"vkCmdExecuteCommands(): command buffer %s is protected while secondary command buffer %s is a unprotected",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str());
} else if ((cb_state->unprotected == true) && (sub_cb_state->unprotected == false)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(sub_cb_state->commandBuffer);
skip |= LogError(
objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01821",
"vkCmdExecuteCommands(): command buffer %s is unprotected while secondary command buffer %s is a protected",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str());
}
}
skip |= ValidateCmd(cb_state, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
return skip;
}
bool CoreChecks::PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
VkFlags flags, void **ppData) const {
bool skip = false;
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
if (mem_info) {
if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
skip = LogError(mem, "VUID-vkMapMemory-memory-00682",
"Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: %s.",
report_data->FormatHandle(mem).c_str());
}
if (mem_info->multi_instance) {
skip = LogError(mem, "VUID-vkMapMemory-memory-00683",
"Memory (%s) must not have been allocated with multiple instances -- either by supplying a deviceMask "
"with more than one bit set, or by allocation from a heap with the MULTI_INSTANCE heap flag set.",
report_data->FormatHandle(mem).c_str());
}
skip |= ValidateMapMemRange(mem_info, offset, size);
}
return skip;
}
bool CoreChecks::PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) const {
bool skip = false;
const auto mem_info = GetDevMemState(mem);
if (mem_info && !mem_info->mapped_range.size) {
// Valid Usage: memory must currently be mapped
skip |= LogError(mem, "VUID-vkUnmapMemory-memory-00689", "Unmapping Memory without memory being mapped: %s.",
report_data->FormatHandle(mem).c_str());
}
return skip;
}
bool CoreChecks::ValidateMemoryIsMapped(const char *funcName, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
for (uint32_t i = 0; i < memRangeCount; ++i) {
auto mem_info = GetDevMemState(pMemRanges[i].memory);
if (mem_info) {
// Makes sure the memory is already mapped
if (mem_info->mapped_range.size == 0) {
skip = LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-memory-00684",
"%s: Attempting to use memory (%s) that is not currently host mapped.", funcName,
report_data->FormatHandle(pMemRanges[i].memory).c_str());
}
if (pMemRanges[i].size == VK_WHOLE_SIZE) {
if (mem_info->mapped_range.offset > pMemRanges[i].offset) {
skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00686",
"%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset),
static_cast<size_t>(mem_info->mapped_range.offset));
}
} else {
const uint64_t data_end = (mem_info->mapped_range.size == VK_WHOLE_SIZE)
? mem_info->alloc_info.allocationSize
: (mem_info->mapped_range.offset + mem_info->mapped_range.size);
if ((mem_info->mapped_range.offset > pMemRanges[i].offset) ||
(data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00685",
"%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end));
}
}
}
}
return skip;
}
bool CoreChecks::ValidateMappedMemoryRangeDeviceLimits(const char *func_name, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) const {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
const uint64_t atom_size = phys_dev_props.limits.nonCoherentAtomSize;
const VkDeviceSize offset = mem_ranges[i].offset;
const VkDeviceSize size = mem_ranges[i].size;
if (SafeModulo(offset, atom_size) != 0) {
skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-offset-00687",
"%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, offset, atom_size);
}
auto mem_info = GetDevMemState(mem_ranges[i].memory);
if (mem_info) {
const auto allocation_size = mem_info->alloc_info.allocationSize;
if (size == VK_WHOLE_SIZE) {
const auto mapping_offset = mem_info->mapped_range.offset;
const auto mapping_size = mem_info->mapped_range.size;
const auto mapping_end = ((mapping_size == VK_WHOLE_SIZE) ? allocation_size : mapping_offset + mapping_size);
if (SafeModulo(mapping_end, atom_size) != 0 && mapping_end != allocation_size) {
skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-size-01389",
"%s: Size in pMemRanges[%d] is VK_WHOLE_SIZE and the mapping end (0x%" PRIxLEAST64
" = 0x%" PRIxLEAST64 " + 0x%" PRIxLEAST64
") not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64
") and not equal to the end of the memory object (0x%" PRIxLEAST64 ").",
func_name, i, mapping_end, mapping_offset, mapping_size, atom_size, allocation_size);
}
} else {
const auto range_end = size + offset;
if (range_end != allocation_size && SafeModulo(size, atom_size) != 0) {
skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-size-01390",
"%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64
") and offset + size (0x%" PRIxLEAST64 " + 0x%" PRIxLEAST64 " = 0x%" PRIxLEAST64
") not equal to the memory size (0x%" PRIxLEAST64 ").",
func_name, i, size, atom_size, offset, size, range_end, allocation_size);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
skip |= ValidateMemoryIsMapped("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
return skip;
}
bool CoreChecks::PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
skip |= ValidateMemoryIsMapped("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) const {
bool skip = false;
const auto mem_info = GetDevMemState(mem);
if (mem_info) {
if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) {
skip = LogError(mem, "VUID-vkGetDeviceMemoryCommitment-memory-00690",
"vkGetDeviceMemoryCommitment(): Querying commitment for memory without "
"VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT set: %s.",
report_data->FormatHandle(mem).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateBindImageMemory(uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos,
const char *api_name) const {
bool skip = false;
bool bind_image_mem_2 = strcmp(api_name, "vkBindImageMemory()") != 0;
char error_prefix[128];
strcpy(error_prefix, api_name);
// Track all image sub resources if they are bound for bind_image_mem_2
// uint32_t[3] is which index in pBindInfos for max 3 planes
// Non disjoint images act as a single plane
layer_data::unordered_map<VkImage, std::array<uint32_t, 3>> resources_bound;
for (uint32_t i = 0; i < bindInfoCount; i++) {
if (bind_image_mem_2 == true) {
sprintf(error_prefix, "%s pBindInfos[%u]", api_name, i);
}
const VkBindImageMemoryInfo &bind_info = pBindInfos[i];
const IMAGE_STATE *image_state = GetImageState(bind_info.image);
if (image_state) {
// Track objects tied to memory
skip |=
ValidateSetMemBinding(bind_info.memory, VulkanTypedHandle(bind_info.image, kVulkanObjectTypeImage), error_prefix);
const auto plane_info = LvlFindInChain<VkBindImagePlaneMemoryInfo>(bind_info.pNext);
const auto mem_info = GetDevMemState(bind_info.memory);
// Need extra check for disjoint flag incase called without bindImage2 and don't want false positive errors
// no 'else' case as if that happens another VUID is already being triggered for it being invalid
if ((plane_info == nullptr) && (image_state->disjoint == false)) {
// Check non-disjoint images VkMemoryRequirements
// All validation using the image_state->requirements for external AHB is check in android only section
if (image_state->external_ahb == false) {
const VkMemoryRequirements mem_req = image_state->requirements;
// Validate memory requirements alignment
if (SafeModulo(bind_info.memoryOffset, mem_req.alignment) != 0) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memoryOffset-01048";
} else if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
validation_error = "VUID-VkBindImageMemoryInfo-pNext-01616";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memoryOffset-01613";
}
skip |=
LogError(bind_info.image, validation_error,
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
error_prefix, bind_info.memoryOffset, mem_req.alignment);
}
if (mem_info) {
safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info;
// Validate memory requirements size
if (mem_req.size > alloc_info.allocationSize - bind_info.memoryOffset) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-size-01049";
} else if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
validation_error = "VUID-VkBindImageMemoryInfo-pNext-01617";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-01614";
}
skip |= LogError(bind_info.image, validation_error,
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
error_prefix, alloc_info.allocationSize - bind_info.memoryOffset, mem_req.size);
}
// Validate memory type used
{
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memory-01047";
} else if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
validation_error = "VUID-VkBindImageMemoryInfo-pNext-01615";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-01612";
}
skip |= ValidateMemoryTypes(mem_info, mem_req.memoryTypeBits, error_prefix, validation_error);
}
}
}
if (bind_image_mem_2 == true) {
// since its a non-disjoint image, finding VkImage in map is a duplicate
auto it = resources_bound.find(image_state->image);
if (it == resources_bound.end()) {
std::array<uint32_t, 3> bound_index = {i, UINT32_MAX, UINT32_MAX};
resources_bound.emplace(image_state->image, bound_index);
} else {
skip |= LogError(
bind_info.image, "VUID-vkBindImageMemory2-pBindInfos-04006",
"%s: The same non-disjoint image resource is being bound twice at pBindInfos[%d] and pBindInfos[%d]",
error_prefix, it->second[0], i);
}
}
} else if ((plane_info != nullptr) && (image_state->disjoint == true)) {
// Check disjoint images VkMemoryRequirements for given plane
int plane = 0;
// All validation using the image_state->plane*_requirements for external AHB is check in android only section
if (image_state->external_ahb == false) {
VkMemoryRequirements disjoint_mem_req = {};
const VkImageAspectFlagBits aspect = plane_info->planeAspect;
switch (aspect) {
case VK_IMAGE_ASPECT_PLANE_0_BIT:
plane = 0;
disjoint_mem_req = image_state->plane0_requirements;
break;
case VK_IMAGE_ASPECT_PLANE_1_BIT:
plane = 1;
disjoint_mem_req = image_state->plane1_requirements;
break;
case VK_IMAGE_ASPECT_PLANE_2_BIT:
plane = 2;
disjoint_mem_req = image_state->plane2_requirements;
break;
default:
assert(false); // parameter validation should have caught this
break;
}
// Validate memory requirements alignment
if (SafeModulo(bind_info.memoryOffset, disjoint_mem_req.alignment) != 0) {
skip |= LogError(
bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01620",
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements2 with disjoint image for aspect plane %s.",
error_prefix, bind_info.memoryOffset, disjoint_mem_req.alignment, string_VkImageAspectFlagBits(aspect));
}
if (mem_info) {
safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info;
// Validate memory requirements size
if (disjoint_mem_req.size > alloc_info.allocationSize - bind_info.memoryOffset) {
skip |= LogError(
bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01621",
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with disjoint image for aspect plane %s.",
error_prefix, alloc_info.allocationSize - bind_info.memoryOffset, disjoint_mem_req.size,
string_VkImageAspectFlagBits(aspect));
}
// Validate memory type used
{
skip |= ValidateMemoryTypes(mem_info, disjoint_mem_req.memoryTypeBits, error_prefix,
"VUID-VkBindImageMemoryInfo-pNext-01619");
}
}
}
auto it = resources_bound.find(image_state->image);
if (it == resources_bound.end()) {
std::array<uint32_t, 3> bound_index = {UINT32_MAX, UINT32_MAX, UINT32_MAX};
bound_index[plane] = i;
resources_bound.emplace(image_state->image, bound_index);
} else {
if (it->second[plane] == UINT32_MAX) {
it->second[plane] = i;
} else {
skip |= LogError(bind_info.image, "VUID-vkBindImageMemory2-pBindInfos-04006",
"%s: The same disjoint image sub-resource for plane %d is being bound twice at "
"pBindInfos[%d] and pBindInfos[%d]",
error_prefix, plane, it->second[plane], i);
}
}
}
if (mem_info) {
// Validate bound memory range information
// if memory is exported to an AHB then the mem_info->allocationSize must be zero and this check is not needed
if ((mem_info->is_export == false) || ((mem_info->export_handle_type_flags &
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0)) {
skip |= ValidateInsertImageMemoryRange(bind_info.image, mem_info, bind_info.memoryOffset, error_prefix);
}
// Validate dedicated allocation
if (mem_info->is_dedicated) {
if (enabled_features.dedicated_allocation_image_aliasing_features.dedicatedAllocationImageAliasing) {
const auto orig_image_state = GetImageState(mem_info->dedicated_image);
const auto current_image_state = GetImageState(bind_info.image);
if ((bind_info.memoryOffset != 0) || !orig_image_state || !current_image_state ||
!current_image_state->IsCreateInfoDedicatedAllocationImageAliasingCompatible(
orig_image_state->createInfo)) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memory-02629";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-02629";
}
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
objlist.add(mem_info->dedicated_image);
skip |= LogError(
objlist, validation_error,
"%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfo:: %s must compatible "
"with %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
error_prefix, report_data->FormatHandle(bind_info.memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str(),
report_data->FormatHandle(bind_info.image).c_str(), bind_info.memoryOffset);
}
} else {
if ((mem_info->dedicated_image != VK_NULL_HANDLE) &&
((bind_info.memoryOffset != 0) || (mem_info->dedicated_image != bind_info.image))) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memory-01509";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-01509";
}
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
objlist.add(mem_info->dedicated_image);
skip |=
LogError(objlist, validation_error,
"%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfo:: %s must be equal "
"to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
error_prefix, report_data->FormatHandle(bind_info.memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str(),
report_data->FormatHandle(bind_info.image).c_str(), bind_info.memoryOffset);
}
}
}
// Validate export memory handles
if ((mem_info->export_handle_type_flags != 0) &&
((mem_info->export_handle_type_flags & image_state->external_memory_handle) == 0)) {
const char *vuid =
bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-memory-02728" : "VUID-vkBindImageMemory-memory-02728";
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least "
"one handle from VkImage (%s) handleType %s.",
error_prefix, report_data->FormatHandle(bind_info.memory).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(),
report_data->FormatHandle(bind_info.image).c_str(),
string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str());
}
// Validate import memory handles
if (mem_info->is_import_ahb == true) {
skip |= ValidateImageImportedHandleANDROID(api_name, image_state->external_memory_handle, bind_info.memory,
bind_info.image);
} else if (mem_info->is_import == true) {
if ((mem_info->import_handle_type_flags & image_state->external_memory_handle) == 0) {
const char *vuid = nullptr;
if ((bind_image_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindImageMemoryInfo-memory-02989";
} else if ((!bind_image_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindImageMemory-memory-02989";
} else if ((bind_image_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindImageMemoryInfo-memory-02729";
} else if ((!bind_image_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindImageMemory-memory-02729";
}
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s "
"which is not set in the VkImage (%s) VkExternalMemoryImageCreateInfo::handleType (%s)",
api_name, report_data->FormatHandle(bind_info.memory).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(),
report_data->FormatHandle(bind_info.image).c_str(),
string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str());
}
}
// Validate mix of protected buffer and memory
if ((image_state->unprotected == false) && (mem_info->unprotected == true)) {
const char *vuid =
bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01901" : "VUID-vkBindImageMemory-None-01901";
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was not created with protected memory but the VkImage (%s) was "
"set to use protected memory.",
api_name, report_data->FormatHandle(bind_info.memory).c_str(),
report_data->FormatHandle(bind_info.image).c_str());
} else if ((image_state->unprotected == true) && (mem_info->unprotected == false)) {
const char *vuid =
bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01902" : "VUID-vkBindImageMemory-None-01902";
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with protected memory but the VkImage (%s) was not "
"set to use protected memory.",
api_name, report_data->FormatHandle(bind_info.memory).c_str(),
report_data->FormatHandle(bind_info.image).c_str());
}
}
const auto swapchain_info = LvlFindInChain<VkBindImageMemorySwapchainInfoKHR>(bind_info.pNext);
if (swapchain_info) {
if (bind_info.memory != VK_NULL_HANDLE) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01631", "%s: %s is not VK_NULL_HANDLE.",
error_prefix, report_data->FormatHandle(bind_info.memory).c_str());
}
if (image_state->create_from_swapchain != swapchain_info->swapchain) {
LogObjectList objlist(image_state->image);
objlist.add(image_state->create_from_swapchain);
objlist.add(swapchain_info->swapchain);
skip |= LogError(
objlist, kVUID_Core_BindImageMemory_Swapchain,
"%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same "
"swapchain",
error_prefix, report_data->FormatHandle(image_state->image).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str(),
report_data->FormatHandle(swapchain_info->swapchain).c_str());
}
const auto swapchain_state = GetSwapchainState(swapchain_info->swapchain);
if (swapchain_state && swapchain_state->images.size() <= swapchain_info->imageIndex) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemorySwapchainInfoKHR-imageIndex-01644",
"%s: imageIndex (%i) is out of bounds of %s images (size: %i)", error_prefix,
swapchain_info->imageIndex, report_data->FormatHandle(swapchain_info->swapchain).c_str(),
static_cast<int>(swapchain_state->images.size()));
}
} else {
if (image_state->create_from_swapchain) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-image-01630",
"%s: pNext of VkBindImageMemoryInfo doesn't include VkBindImageMemorySwapchainInfoKHR.",
error_prefix);
}
if (!mem_info) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01632", "%s: %s is invalid.", error_prefix,
report_data->FormatHandle(bind_info.memory).c_str());
}
}
if (plane_info) {
// Checks for disjoint bit in image
if (image_state->disjoint == false) {
skip |= LogError(
bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01618",
"%s: pNext of VkBindImageMemoryInfo contains VkBindImagePlaneMemoryInfo and %s is not created with "
"VK_IMAGE_CREATE_DISJOINT_BIT.",
error_prefix, report_data->FormatHandle(image_state->image).c_str());
}
// Make sure planeAspect is only a single, valid plane
uint32_t planes = FormatPlaneCount(image_state->createInfo.format);
VkImageAspectFlags aspect = plane_info->planeAspect;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) {
skip |= LogError(
bind_info.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283",
"%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT.",
error_prefix, report_data->FormatHandle(image_state->image).c_str(),
string_VkImageAspectFlags(aspect).c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) {
skip |= LogError(
bind_info.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283",
"%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.",
error_prefix, report_data->FormatHandle(image_state->image).c_str(),
string_VkImageAspectFlags(aspect).c_str());
}
}
}
}
// Check to make sure all disjoint planes were bound
for (auto &resource : resources_bound) {
const IMAGE_STATE *image_state = GetImageState(resource.first);
if (image_state->disjoint == true) {
uint32_t total_planes = FormatPlaneCount(image_state->createInfo.format);
for (uint32_t i = 0; i < total_planes; i++) {
if (resource.second[i] == UINT32_MAX) {
skip |= LogError(resource.first, "VUID-vkBindImageMemory2-pBindInfos-02858",
"%s: Plane %u of the disjoint image was not bound. All %d planes need to bound individually "
"in separate pBindInfos in a single call.",
api_name, i, total_planes);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem,
VkDeviceSize memoryOffset) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state) {
// Checks for no disjoint bit
if (image_state->disjoint == true) {
skip |=
LogError(image, "VUID-vkBindImageMemory-image-01608",
"%s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT (need to use vkBindImageMemory2).",
report_data->FormatHandle(image).c_str());
}
}
auto bind_info = LvlInitStruct<VkBindImageMemoryInfo>();
bind_info.image = image;
bind_info.memory = mem;
bind_info.memoryOffset = memoryOffset;
skip |= ValidateBindImageMemory(1, &bind_info, "vkBindImageMemory()");
return skip;
}
bool CoreChecks::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfo *pBindInfos) const {
return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2()");
}
bool CoreChecks::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfo *pBindInfos) const {
return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2KHR()");
}
bool CoreChecks::PreCallValidateSetEvent(VkDevice device, VkEvent event) const {
bool skip = false;
const auto event_state = GetEventState(event);
if (event_state) {
if (event_state->write_in_use) {
skip |=
LogError(event, kVUID_Core_DrawState_QueueForwardProgress,
"vkSetEvent(): %s that is already in use by a command buffer.", report_data->FormatHandle(event).c_str());
}
if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) {
skip |= LogError(event, "VUID-vkSetEvent-event-03941",
"vkSetEvent(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.",
report_data->FormatHandle(event).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateResetEvent(VkDevice device, VkEvent event) const {
bool skip = false;
const auto event_state = GetEventState(event);
if (event_state) {
if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) {
skip |= LogError(event, "VUID-vkResetEvent-event-03823",
"vkResetEvent(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.",
report_data->FormatHandle(event).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetEventStatus(VkDevice device, VkEvent event) const {
bool skip = false;
const auto event_state = GetEventState(event);
if (event_state) {
if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) {
skip |= LogError(event, "VUID-vkGetEventStatus-event-03940",
"vkGetEventStatus(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.",
report_data->FormatHandle(event).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
VkFence fence) const {
const auto queue_data = GetQueueState(queue);
const auto fence_state = GetFenceState(fence);
bool skip = ValidateFenceForSubmit(fence_state, "VUID-vkQueueBindSparse-fence-01114", "VUID-vkQueueBindSparse-fence-01113",
"VkQueueBindSparse()");
if (skip) {
return true;
}
const auto queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_data->queueFamilyIndex].queueFlags;
if (!(queue_flags & VK_QUEUE_SPARSE_BINDING_BIT)) {
skip |= LogError(queue, "VUID-vkQueueBindSparse-queuetype",
"vkQueueBindSparse(): a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set.");
}
layer_data::unordered_set<VkSemaphore> signaled_semaphores;
layer_data::unordered_set<VkSemaphore> unsignaled_semaphores;
layer_data::unordered_set<VkSemaphore> internal_semaphores;
auto *vuid_error = device_extensions.vk_khr_timeline_semaphore ? "VUID-vkQueueBindSparse-pWaitSemaphores-03245"
: kVUID_Core_DrawState_QueueForwardProgress;
for (uint32_t bind_idx = 0; bind_idx < bindInfoCount; ++bind_idx) {
const VkBindSparseInfo &bind_info = pBindInfo[bind_idx];
auto timeline_semaphore_submit_info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(pBindInfo->pNext);
std::vector<SEMAPHORE_WAIT> semaphore_waits;
std::vector<VkSemaphore> semaphore_signals;
for (uint32_t i = 0; i < bind_info.waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bind_info.pWaitSemaphores[i];
const auto semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && !timeline_semaphore_submit_info) {
skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246",
"VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, but "
"pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfo",
bind_idx, i, report_data->FormatHandle(semaphore).c_str(), bind_idx);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info &&
bind_info.waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) {
skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pNext-03247",
"VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, it contains "
"an instance of VkTimelineSemaphoreSubmitInfo, but waitSemaphoreValueCount (%u) is different "
"than pBindInfo[%u].waitSemaphoreCount (%u)",
bind_idx, i, report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->waitSemaphoreValueCount, bind_idx, bind_info.waitSemaphoreCount);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY &&
(semaphore_state->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(semaphore_state->signaled) && !SemaphoreWasSignaled(semaphore))) {
LogObjectList objlist(semaphore);
objlist.add(queue);
skip |= LogError(
objlist,
semaphore_state->scope == kSyncScopeInternal ? vuid_error : kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): Queue %s is waiting on pBindInfo[%u].pWaitSemaphores[%u] (%s) that has no way to be "
"signaled.",
report_data->FormatHandle(queue).c_str(), bind_idx, i, report_data->FormatHandle(semaphore).c_str());
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY &&
semaphore_state->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
}
for (uint32_t i = 0; i < bind_info.signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bind_info.pSignalSemaphores[i];
const auto semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && !timeline_semaphore_submit_info) {
skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246",
"VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, but "
"pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfo",
bind_idx, i, report_data->FormatHandle(semaphore).c_str(), bind_idx);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info &&
timeline_semaphore_submit_info->pSignalSemaphoreValues[i] <= semaphore_state->payload) {
LogObjectList objlist(semaphore);
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkBindSparseInfo-pSignalSemaphores-03249",
"VkQueueBindSparse: signal value (0x%" PRIx64
") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64
") in pBindInfo[%u].pSignalSemaphores[%u]",
semaphore_state->payload, report_data->FormatHandle(queue).c_str(),
report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->pSignalSemaphoreValues[i], bind_idx, i);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info &&
bind_info.signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) {
skip |=
LogError(semaphore, "VUID-VkBindSparseInfo-pNext-03248",
"VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, it contains "
"an instance of VkTimelineSemaphoreSubmitInfo, but signalSemaphoreValueCount (%u) is different "
"than pBindInfo[%u].signalSemaphoreCount (%u)",
bind_idx, i, report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->signalSemaphoreValueCount, bind_idx, bind_info.signalSemaphoreCount);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY &&
semaphore_state->scope == kSyncScopeInternal) {
if (signaled_semaphores.count(semaphore) ||
(!(unsignaled_semaphores.count(semaphore)) && semaphore_state->signaled)) {
LogObjectList objlist(semaphore);
objlist.add(queue);
objlist.add(semaphore_state->signaler.first);
skip |= LogError(objlist, kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): %s is signaling pBindInfo[%u].pSignalSemaphores[%u] (%s) that was "
"previously signaled by %s but has not since been waited on by any queue.",
report_data->FormatHandle(queue).c_str(), bind_idx, i,
report_data->FormatHandle(semaphore).c_str(),
report_data->FormatHandle(semaphore_state->signaler.first).c_str());
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
}
for (uint32_t image_idx = 0; image_idx < bind_info.imageBindCount; ++image_idx) {
const VkSparseImageMemoryBindInfo &image_bind = bind_info.pImageBinds[image_idx];
const auto image_state = GetImageState(image_bind.image);
if (image_state && !(image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT)) {
skip |= LogError(image_bind.image, "VUID-VkSparseImageMemoryBindInfo-image-02901",
"vkQueueBindSparse(): pBindInfo[%u].pImageBinds[%u]: image must have been created with "
"VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT set",
bind_idx, image_idx);
}
}
}
if (skip) return skip;
// Now verify maxTimelineSemaphoreValueDifference
for (uint32_t bind_idx = 0; bind_idx < bindInfoCount; ++bind_idx) {
Location outer_loc(Func::vkQueueBindSparse, Struct::VkBindSparseInfo);
const VkBindSparseInfo *bind_info = &pBindInfo[bind_idx];
auto *info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(bind_info->pNext);
if (info) {
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->waitSemaphoreValueCount) {
for (uint32_t i = 0; i < bind_info->waitSemaphoreCount; ++i) {
auto loc = outer_loc.dot(Field::pWaitSemaphoreValues, i);
VkSemaphore semaphore = bind_info->pWaitSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc, semaphore, info->pWaitSemaphoreValues[i]);
}
}
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->signalSemaphoreValueCount) {
for (uint32_t i = 0; i < bind_info->signalSemaphoreCount; ++i) {
auto loc = outer_loc.dot(Field::pSignalSemaphoreValues, i);
VkSemaphore semaphore = bind_info->pSignalSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc, semaphore, info->pSignalSemaphoreValues[i]);
}
}
}
}
return skip;
}
bool CoreChecks::ValidateSignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo, const char *api_name) const {
bool skip = false;
const auto semaphore_state = GetSemaphoreState(pSignalInfo->semaphore);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) {
skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-semaphore-03257",
"%s(): semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE type", api_name,
report_data->FormatHandle(pSignalInfo->semaphore).c_str());
return skip;
}
if (semaphore_state && semaphore_state->payload >= pSignalInfo->value) {
skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03258",
"%s(): value must be greater than current semaphore %s value", api_name,
report_data->FormatHandle(pSignalInfo->semaphore).c_str());
}
for (auto &pair : queueMap) {
const QUEUE_STATE &queue_state = pair.second;
for (const auto &submission : queue_state.submissions) {
for (const auto &signal_semaphore : submission.signalSemaphores) {
if (signal_semaphore.semaphore == pSignalInfo->semaphore && pSignalInfo->value >= signal_semaphore.payload) {
skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03259",
"%s(): value must be greater than value of pending signal operation "
"for semaphore %s",
api_name, report_data->FormatHandle(pSignalInfo->semaphore).c_str());
}
}
}
}
if (!skip) {
Location loc(Func::vkSignalSemaphore, Struct::VkSemaphoreSignalInfo, Field::value);
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc, pSignalInfo->semaphore, pSignalInfo->value);
}
return skip;
}
bool CoreChecks::PreCallValidateSignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo) const {
return ValidateSignalSemaphore(device, pSignalInfo, "vkSignalSemaphore");
}
bool CoreChecks::PreCallValidateSignalSemaphoreKHR(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo) const {
return ValidateSignalSemaphore(device, pSignalInfo, "vkSignalSemaphoreKHR");
}
bool CoreChecks::ValidateImportSemaphore(VkSemaphore semaphore, const char *caller_name) const {
bool skip = false;
const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
if (sema_node) {
const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore);
skip |= ValidateObjectNotInUse(sema_node, obj_struct, caller_name, kVUIDUndefined);
}
return skip;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportSemaphoreWin32HandleKHR(
VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) const {
return ValidateImportSemaphore(pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportSemaphoreFdKHR(VkDevice device,
const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) const {
return ValidateImportSemaphore(pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
}
bool CoreChecks::ValidateImportFence(VkFence fence, const char *vuid, const char *caller_name) const {
const FENCE_STATE *fence_node = GetFenceState(fence);
bool skip = false;
if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |=
LogError(fence, vuid, "%s: Fence %s that is currently in use.", caller_name, report_data->FormatHandle(fence).c_str());
}
return skip;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportFenceWin32HandleKHR(
VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) const {
return ValidateImportFence(pImportFenceWin32HandleInfo->fence, "VUID-vkImportFenceWin32HandleKHR-fence-04448",
"vkImportFenceWin32HandleKHR()");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) const {
return ValidateImportFence(pImportFenceFdInfo->fence, "VUID-vkImportFenceFdKHR-fence-01463", "vkImportFenceFdKHR()");
}
static VkImageCreateInfo GetSwapchainImpliedImageCreateInfo(VkSwapchainCreateInfoKHR const *pCreateInfo) {
auto result = LvlInitStruct<VkImageCreateInfo>();
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) {
result.flags |= VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT;
}
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR) result.flags |= VK_IMAGE_CREATE_PROTECTED_BIT;
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
result.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT;
}
result.imageType = VK_IMAGE_TYPE_2D;
result.format = pCreateInfo->imageFormat;
result.extent.width = pCreateInfo->imageExtent.width;
result.extent.height = pCreateInfo->imageExtent.height;
result.extent.depth = 1;
result.mipLevels = 1;
result.arrayLayers = pCreateInfo->imageArrayLayers;
result.samples = VK_SAMPLE_COUNT_1_BIT;
result.tiling = VK_IMAGE_TILING_OPTIMAL;
result.usage = pCreateInfo->imageUsage;
result.sharingMode = pCreateInfo->imageSharingMode;
result.queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount;
result.pQueueFamilyIndices = pCreateInfo->pQueueFamilyIndices;
result.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
return result;
}
bool CoreChecks::ValidateCreateSwapchain(const char *func_name, VkSwapchainCreateInfoKHR const *pCreateInfo,
const SURFACE_STATE *surface_state, const SWAPCHAIN_NODE *old_swapchain_state) const {
// All physical devices and queue families are required to be able to present to any native window on Android; require the
// application to have established support on any other platform.
if (!instance_extensions.vk_khr_android_surface) {
auto support_predicate = [this](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
// TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
return (qs.first.gpu == physical_device) && qs.second;
};
const auto &support = surface_state->gpu_queue_support;
bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
if (!is_supported) {
if (LogError(
device, "VUID-VkSwapchainCreateInfoKHR-surface-01270",
"%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
"vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
"this surface for at least one queue family of this device.",
func_name)) {
return true;
}
}
}
if (old_swapchain_state) {
if (old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
"%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name)) {
return true;
}
}
if (old_swapchain_state->retired) {
if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
"%s: pCreateInfo->oldSwapchain is retired", func_name)) {
return true;
}
}
}
if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01689",
"%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width,
pCreateInfo->imageExtent.height)) {
return true;
}
}
auto physical_device_state = GetPhysicalDeviceState();
bool skip = false;
VkSurfaceTransformFlagBitsKHR current_transform = physical_device_state->surfaceCapabilities.currentTransform;
if ((pCreateInfo->preTransform & current_transform) != pCreateInfo->preTransform) {
skip |= LogPerformanceWarning(physical_device, kVUID_Core_Swapchain_PreTransform,
"%s: pCreateInfo->preTransform (%s) doesn't match the currentTransform (%s) returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR, the presentation engine will transform the image "
"content as part of the presentation operation.",
func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform),
string_VkSurfaceTransformFlagBitsKHR(current_transform));
}
VkSurfaceCapabilitiesKHR capabilities{};
DispatchGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_state->phys_device, pCreateInfo->surface, &capabilities);
// Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
if (pCreateInfo->minImageCount < capabilities.minImageCount) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01271",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) {
return true;
}
}
if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01272",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) {
return true;
}
}
// Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
(pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
(pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
(pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274",
"%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
"maxImageExtent = (%d,%d).",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, capabilities.currentExtent.width,
capabilities.currentExtent.height, capabilities.minImageExtent.width, capabilities.minImageExtent.height,
capabilities.maxImageExtent.width, capabilities.maxImageExtent.height)) {
return true;
}
}
// pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedTransforms.
if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
!(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string error_string = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name,
string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
error_string += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedTransforms) {
const char *new_str = string_VkSurfaceTransformFlagBitsKHR(static_cast<VkSurfaceTransformFlagBitsKHR>(1 << i));
sprintf(str, " %s\n", new_str);
error_string += str;
}
}
// Log the message that we've built up:
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.", error_string.c_str())) return true;
}
// pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
!((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string error_string = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n", func_name,
string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
error_string += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedCompositeAlpha) {
const char *new_str = string_VkCompositeAlphaFlagBitsKHR(static_cast<VkCompositeAlphaFlagBitsKHR>(1 << i));
sprintf(str, " %s\n", new_str);
error_string += str;
}
}
// Log the message that we've built up:
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.", error_string.c_str())) return true;
}
// Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275",
"%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name,
pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers)) {
return true;
}
}
// Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
const char *validation_error = "VUID-VkSwapchainCreateInfoKHR-imageUsage-01276";
if ((IsExtEnabled(device_extensions.vk_khr_shared_presentable_image) == true) &&
((pCreateInfo->presentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) ||
(pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR) || (pCreateInfo->presentMode == VK_PRESENT_MODE_FIFO_KHR) ||
(pCreateInfo->presentMode == VK_PRESENT_MODE_FIFO_RELAXED_KHR))) {
validation_error = "VUID-VkSwapchainCreateInfoKHR-presentMode-01427";
}
if (LogError(device, validation_error,
"%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.",
func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags)) {
return true;
}
}
if (device_extensions.vk_khr_surface_protected_capabilities && (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR)) {
VkPhysicalDeviceSurfaceInfo2KHR surface_info = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR};
surface_info.surface = pCreateInfo->surface;
VkSurfaceProtectedCapabilitiesKHR surface_protected_capabilities = {VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR};
VkSurfaceCapabilities2KHR surface_capabilities = {VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR};
surface_capabilities.pNext = &surface_protected_capabilities;
DispatchGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device_state->phys_device, &surface_info, &surface_capabilities);
if (!surface_protected_capabilities.supportsProtected) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03187",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR but the surface "
"capabilities does not have VkSurfaceProtectedCapabilitiesKHR.supportsProtected set to VK_TRUE.",
func_name)) {
return true;
}
}
}
std::vector<VkSurfaceFormatKHR> surface_formats;
const auto *surface_formats_ref = &surface_formats;
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
if (physical_device_state->surface_formats.empty()) {
uint32_t surface_format_count = 0;
DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count, nullptr);
surface_formats.resize(surface_format_count);
DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count,
&surface_formats[0]);
} else {
surface_formats_ref = &physical_device_state->surface_formats;
}
{
// Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
bool found_format = false;
bool found_color_space = false;
bool found_match = false;
for (const auto &format : *surface_formats_ref) {
if (pCreateInfo->imageFormat == format.format) {
// Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
found_format = true;
if (pCreateInfo->imageColorSpace == format.colorSpace) {
found_match = true;
break;
}
} else {
if (pCreateInfo->imageColorSpace == format.colorSpace) {
found_color_space = true;
}
}
}
if (!found_match) {
if (!found_format) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageFormat (%s).", func_name,
string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
}
if (!found_color_space) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageColorSpace (%s).", func_name,
string_VkColorSpaceKHR(pCreateInfo->imageColorSpace))) {
return true;
}
}
}
}
std::vector<VkPresentModeKHR> present_modes;
const auto *present_modes_ref = &present_modes;
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
if (physical_device_state->present_modes.empty()) {
uint32_t present_mode_count = 0;
DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->phys_device, pCreateInfo->surface,
&present_mode_count, nullptr);
present_modes.resize(present_mode_count);
DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->phys_device, pCreateInfo->surface,
&present_mode_count, &present_modes[0]);
} else {
present_modes_ref = &physical_device_state->present_modes;
}
// Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
bool found_match =
std::find(present_modes_ref->begin(), present_modes_ref->end(), pCreateInfo->presentMode) != present_modes_ref->end();
if (!found_match) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-presentMode-01281",
"%s called with a non-supported presentMode (i.e. %s).", func_name,
string_VkPresentModeKHR(pCreateInfo->presentMode))) {
return true;
}
}
// Validate state for shared presentable case
if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
if (!device_extensions.vk_khr_shared_presentable_image) {
if (LogError(
device, kVUID_Core_DrawState_ExtensionNotEnabled,
"%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
"been enabled.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode))) {
return true;
}
} else if (pCreateInfo->minImageCount != 1) {
if (LogError(
device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01383",
"%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
"must be 1.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount)) {
return true;
}
}
}
if ((pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) && pCreateInfo->pQueueFamilyIndices) {
bool skip1 = ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
"vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices",
"VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428");
if (skip1) return true;
}
// Validate pCreateInfo->imageUsage against GetPhysicalDeviceFormatProperties
const VkFormatProperties format_properties = GetPDFormatProperties(pCreateInfo->imageFormat);
const VkFormatFeatureFlags tiling_features = format_properties.optimalTilingFeatures;
if (tiling_features == 0) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL has no supported format features on this "
"physical device.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_SAMPLED_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_STORAGE_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) &&
!(tiling_features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT or VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
}
const VkImageCreateInfo image_create_info = GetSwapchainImpliedImageCreateInfo(pCreateInfo);
VkImageFormatProperties image_properties = {};
const VkResult image_properties_result = DispatchGetPhysicalDeviceImageFormatProperties(
physical_device, image_create_info.format, image_create_info.imageType, image_create_info.tiling, image_create_info.usage,
image_create_info.flags, &image_properties);
if (image_properties_result != VK_SUCCESS) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"vkGetPhysicalDeviceImageFormatProperties() unexpectedly failed, "
"when called for %s validation with following params: "
"format: %s, imageType: %s, "
"tiling: %s, usage: %s, "
"flags: %s.",
func_name, string_VkFormat(image_create_info.format), string_VkImageType(image_create_info.imageType),
string_VkImageTiling(image_create_info.tiling), string_VkImageUsageFlags(image_create_info.usage).c_str(),
string_VkImageCreateFlags(image_create_info.flags).c_str())) {
return true;
}
}
// Validate pCreateInfo->imageArrayLayers against VkImageFormatProperties::maxArrayLayers
if (pCreateInfo->imageArrayLayers > image_properties.maxArrayLayers) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s called with a non-supported imageArrayLayers (i.e. %d). "
"Maximum value returned by vkGetPhysicalDeviceImageFormatProperties() is %d "
"for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL",
func_name, pCreateInfo->imageArrayLayers, image_properties.maxArrayLayers,
string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
}
// Validate pCreateInfo->imageExtent against VkImageFormatProperties::maxExtent
if ((pCreateInfo->imageExtent.width > image_properties.maxExtent.width) ||
(pCreateInfo->imageExtent.height > image_properties.maxExtent.height)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s called with imageExtent = (%d,%d), which is bigger than max extent (%d,%d)"
"returned by vkGetPhysicalDeviceImageFormatProperties(): "
"for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, image_properties.maxExtent.width,
image_properties.maxExtent.height, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
}
if ((pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) &&
device_group_create_info.physicalDeviceCount == 1) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-physicalDeviceCount-01429",
"%s called with flags containing VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR"
"but logical device was created with VkDeviceGroupDeviceCreateInfo::physicalDeviceCount equal to 1",
func_name)) {
return true;
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) const {
const auto surface_state = GetSurfaceState(pCreateInfo->surface);
const auto old_swapchain_state = GetSwapchainState(pCreateInfo->oldSwapchain);
return ValidateCreateSwapchain("vkCreateSwapchainKHR()", pCreateInfo, surface_state, old_swapchain_state);
}
void CoreChecks::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain,
const VkAllocationCallbacks *pAllocator) {
if (swapchain) {
auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data) {
for (const auto &swapchain_image : swapchain_data->images) {
if (!swapchain_image.image_state) continue;
imageLayoutMap.erase(swapchain_image.image_state->image);
qfo_release_image_barrier_map.erase(swapchain_image.image_state->image);
}
}
}
StateTracker::PreCallRecordDestroySwapchainKHR(device, swapchain, pAllocator);
}
bool CoreChecks::PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages) const {
auto swapchain_state = GetSwapchainState(swapchain);
bool skip = false;
if (swapchain_state && pSwapchainImages) {
if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
skip |=
LogError(device, kVUID_Core_Swapchain_InvalidCount,
"vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImages, and with pSwapchainImageCount set to a "
"value (%d) that is greater than the value (%d) that was returned when pSwapchainImages was NULL.",
*pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
}
}
return skip;
}
void CoreChecks::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages, VkResult result) {
// This function will run twice. The first is to get pSwapchainImageCount. The second is to get pSwapchainImages.
// The first time in StateTracker::PostCallRecordGetSwapchainImagesKHR only generates the container's size.
// The second time in StateTracker::PostCallRecordGetSwapchainImagesKHR will create VKImage and IMAGE_STATE.
// So GlobalImageLayoutMap saving new IMAGE_STATEs has to run in the second time.
// pSwapchainImages is not nullptr and it needs to wait until StateTracker::PostCallRecordGetSwapchainImagesKHR.
uint32_t new_swapchain_image_index = 0;
if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) {
auto swapchain_state = GetSwapchainState(swapchain);
const auto image_vector_size = swapchain_state->images.size();
for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) {
if ((new_swapchain_image_index >= image_vector_size) ||
!swapchain_state->images[new_swapchain_image_index].image_state) {
break;
};
}
}
StateTracker::PostCallRecordGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages, result);
if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) {
for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) {
auto image_state = Get<IMAGE_STATE>(pSwapchainImages[new_swapchain_image_index]);
AddInitialLayoutintoImageLayoutMap(*image_state, imageLayoutMap);
}
}
}
bool CoreChecks::PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) const {
bool skip = false;
const auto queue_state = GetQueueState(queue);
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
const auto semaphore_state = GetSemaphoreState(pPresentInfo->pWaitSemaphores[i]);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_BINARY) {
skip |= LogError(pPresentInfo->pWaitSemaphores[i], "VUID-vkQueuePresentKHR-pWaitSemaphores-03267",
"vkQueuePresentKHR: pWaitSemaphores[%u] (%s) is not a VK_SEMAPHORE_TYPE_BINARY", i,
report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str());
}
if (semaphore_state && !semaphore_state->signaled && !SemaphoreWasSignaled(pPresentInfo->pWaitSemaphores[i])) {
LogObjectList objlist(queue);
objlist.add(pPresentInfo->pWaitSemaphores[i]);
skip |= LogError(objlist, "VUID-vkQueuePresentKHR-pWaitSemaphores-03268",
"vkQueuePresentKHR: Queue %s is waiting on pWaitSemaphores[%u] (%s) that has no way to be signaled.",
report_data->FormatHandle(queue).c_str(), i,
report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str());
}
}
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]);
if (swapchain_data) {
// VU currently is 2-in-1, covers being a valid index and valid layout
const char *validation_error = (device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkPresentInfoKHR-pImageIndices-01430"
: "VUID-VkPresentInfoKHR-pImageIndices-01296";
// Check if index is even possible to be acquired to give better error message
if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
skip |= LogError(
pPresentInfo->pSwapchains[i], validation_error,
"vkQueuePresentKHR: pSwapchains[%u] image index is too large (%u). There are only %u images in this swapchain.",
i, pPresentInfo->pImageIndices[i], static_cast<uint32_t>(swapchain_data->images.size()));
} else {
const auto *image_state = swapchain_data->images[pPresentInfo->pImageIndices[i]].image_state;
assert(image_state);
if (!image_state->acquired) {
skip |= LogError(pPresentInfo->pSwapchains[i], validation_error,
"vkQueuePresentKHR: pSwapchains[%u] image index %u has not been acquired.", i,
pPresentInfo->pImageIndices[i]);
}
vector<VkImageLayout> layouts;
if (FindLayouts(*image_state, layouts)) {
for (auto layout : layouts) {
if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!device_extensions.vk_khr_shared_presentable_image ||
(layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
skip |= LogError(queue, validation_error,
"vkQueuePresentKHR(): pSwapchains[%u] images passed to present must be in layout "
"VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.",
i, string_VkImageLayout(layout));
}
}
}
}
// All physical devices and queue families are required to be able to present to any native window on Android; require
// the application to have established support on any other platform.
if (!instance_extensions.vk_khr_android_surface) {
const auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface);
auto support_it = surface_state->gpu_queue_support.find({physical_device, queue_state->queueFamilyIndex});
if (support_it == surface_state->gpu_queue_support.end()) {
skip |= LogError(
pPresentInfo->pSwapchains[i], kVUID_Core_DrawState_SwapchainUnsupportedQueue,
"vkQueuePresentKHR: Presenting pSwapchains[%u] image without calling vkGetPhysicalDeviceSurfaceSupportKHR",
i);
} else if (!support_it->second) {
skip |= LogError(
pPresentInfo->pSwapchains[i], "VUID-vkQueuePresentKHR-pSwapchains-01292",
"vkQueuePresentKHR: Presenting pSwapchains[%u] image on queue that cannot present to this surface.", i);
}
}
}
}
if (pPresentInfo->pNext) {
// Verify ext struct
const auto *present_regions = LvlFindInChain<VkPresentRegionsKHR>(pPresentInfo->pNext);
if (present_regions) {
for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]);
assert(swapchain_data);
VkPresentRegionKHR region = present_regions->pRegions[i];
for (uint32_t j = 0; j < region.rectangleCount; ++j) {
VkRectLayerKHR rect = region.pRectangles[j];
// Swap offsets and extents for 90 or 270 degree preTransform rotation
if (swapchain_data->createInfo.preTransform &
(VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR | VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR)) {
std::swap(rect.offset.x, rect.offset.y);
std::swap(rect.extent.width, rect.extent.height);
}
if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
skip |=
LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-04864",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], "
"the sum of offset.x (%i) and extent.width (%i) after applying preTransform (%s) is greater "
"than the corresponding swapchain's imageExtent.width (%i).",
i, j, rect.offset.x, rect.extent.width,
string_VkSurfaceTransformFlagBitsKHR(swapchain_data->createInfo.preTransform),
swapchain_data->createInfo.imageExtent.width);
}
if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
skip |=
LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-04864",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], "
"the sum of offset.y (%i) and extent.height (%i) after applying preTransform (%s) is greater "
"than the corresponding swapchain's imageExtent.height (%i).",
i, j, rect.offset.y, rect.extent.height,
string_VkSurfaceTransformFlagBitsKHR(swapchain_data->createInfo.preTransform),
swapchain_data->createInfo.imageExtent.height);
}
if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
skip |= LogError(
pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-layer-01262",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
"(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
}
}
}
}
const auto *present_times_info = LvlFindInChain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
if (present_times_info) {
if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
skip |=
LogError(pPresentInfo->pSwapchains[0], "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247",
"vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
"is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
"VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
present_times_info->swapchainCount, pPresentInfo->swapchainCount);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkSwapchainKHR *pSwapchains) const {
bool skip = false;
if (pCreateInfos) {
for (uint32_t i = 0; i < swapchainCount; i++) {
const auto surface_state = GetSurfaceState(pCreateInfos[i].surface);
const auto old_swapchain_state = GetSwapchainState(pCreateInfos[i].oldSwapchain);
std::stringstream func_name;
func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]()";
skip |= ValidateCreateSwapchain(func_name.str().c_str(), &pCreateInfos[i], surface_state, old_swapchain_state);
}
}
return skip;
}
bool CoreChecks::ValidateAcquireNextImage(VkDevice device, const CommandVersion cmd_version, VkSwapchainKHR swapchain,
uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex,
const char *func_name, const char *semaphore_type_vuid) const {
bool skip = false;
auto semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_BINARY) {
skip |= LogError(semaphore, semaphore_type_vuid, "%s: %s is not a VK_SEMAPHORE_TYPE_BINARY", func_name,
report_data->FormatHandle(semaphore).c_str());
}
if (semaphore_state && semaphore_state->scope == kSyncScopeInternal && semaphore_state->signaled) {
skip |= LogError(semaphore, "VUID-vkAcquireNextImageKHR-semaphore-01286",
"%s: Semaphore must not be currently signaled or in a wait state.", func_name);
}
auto fence_state = GetFenceState(fence);
if (fence_state) {
skip |= ValidateFenceForSubmit(fence_state, "VUID-vkAcquireNextImageKHR-fence-01287",
"VUID-vkAcquireNextImageKHR-fence-01287", "vkAcquireNextImageKHR()");
}
const auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data) {
if (swapchain_data->retired) {
skip |= LogError(swapchain, "VUID-vkAcquireNextImageKHR-swapchain-01285",
"%s: This swapchain has been retired. The application can still present any images it "
"has acquired, but cannot acquire any more.",
func_name);
}
auto physical_device_state = GetPhysicalDeviceState();
// TODO: this is technically wrong on many levels, but requires massive cleanup
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHR_called) {
const uint32_t acquired_images = static_cast<uint32_t>(
std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
[](const SWAPCHAIN_IMAGE &image) { return (image.image_state && image.image_state->acquired); }));
const uint32_t swapchain_image_count = static_cast<uint32_t>(swapchain_data->images.size());
const auto min_image_count = physical_device_state->surfaceCapabilities.minImageCount;
const bool too_many_already_acquired = acquired_images > swapchain_image_count - min_image_count;
if (timeout == UINT64_MAX && too_many_already_acquired) {
const char *vuid = "INVALID-vuid";
if (cmd_version == CMD_VERSION_1) {
vuid = "VUID-vkAcquireNextImageKHR-swapchain-01802";
} else if (cmd_version == CMD_VERSION_2) {
vuid = "VUID-vkAcquireNextImage2KHR-swapchain-01803";
} else {
assert(false);
}
const uint32_t acquirable = swapchain_image_count - min_image_count + 1;
skip |= LogError(swapchain, vuid,
"%s: Application has already previously acquired %" PRIu32 " image%s from swapchain. Only %" PRIu32
" %s available to be acquired using a timeout of UINT64_MAX (given the swapchain has %" PRIu32
", and VkSurfaceCapabilitiesKHR::minImageCount is %" PRIu32 ").",
func_name, acquired_images, acquired_images > 1 ? "s" : "", acquirable,
acquirable > 1 ? "are" : "is", swapchain_image_count, min_image_count);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) const {
return ValidateAcquireNextImage(device, CMD_VERSION_1, swapchain, timeout, semaphore, fence, pImageIndex,
"vkAcquireNextImageKHR", "VUID-vkAcquireNextImageKHR-semaphore-03265");
}
bool CoreChecks::PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex) const {
bool skip = false;
skip |= ValidateDeviceMaskToPhysicalDeviceCount(pAcquireInfo->deviceMask, pAcquireInfo->swapchain,
"VUID-VkAcquireNextImageInfoKHR-deviceMask-01290");
skip |= ValidateDeviceMaskToZero(pAcquireInfo->deviceMask, pAcquireInfo->swapchain,
"VUID-VkAcquireNextImageInfoKHR-deviceMask-01291");
skip |= ValidateAcquireNextImage(device, CMD_VERSION_2, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore,
pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR",
"VUID-VkAcquireNextImageInfoKHR-semaphore-03266");
return skip;
}
bool CoreChecks::PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface,
const VkAllocationCallbacks *pAllocator) const {
const auto surface_state = GetSurfaceState(surface);
bool skip = false;
if ((surface_state) && (surface_state->swapchain)) {
skip |= LogError(instance, "VUID-vkDestroySurfaceKHR-surface-01266",
"vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed.");
}
return skip;
}
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display *display) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306",
"vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309",
"vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, xcb_connection_t *connection,
xcb_visualid_t visual_id) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312",
"vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, Display *dpy,
VisualID visualID) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315",
"vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_XLIB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
VkSurfaceKHR surface, VkBool32 *pSupported) const {
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(physical_device_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269",
"vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
}
bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name,
const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo) const {
bool skip = false;
const auto layout = GetDescriptorSetLayoutShared(pCreateInfo->descriptorSetLayout);
if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
skip |= LogError(pCreateInfo->descriptorSetLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350",
"%s: Invalid pCreateInfo->descriptorSetLayout (%s)", func_name,
report_data->FormatHandle(pCreateInfo->descriptorSetLayout).c_str());
} else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) {
auto bind_point = pCreateInfo->pipelineBindPoint;
bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) ||
(bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV);
if (!valid_bp) {
skip |=
LogError(device, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351",
"%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point));
}
const auto pipeline_layout = GetPipelineLayout(pCreateInfo->pipelineLayout);
if (!pipeline_layout) {
skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352",
"%s: Invalid pCreateInfo->pipelineLayout (%s)", func_name,
report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str());
} else {
const uint32_t pd_set = pCreateInfo->set;
if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] ||
!pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) {
skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353",
"%s: pCreateInfo->set (%" PRIu32
") does not refer to the push descriptor set layout for pCreateInfo->pipelineLayout (%s).",
func_name, pd_set, report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) const {
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", pCreateInfo);
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) const {
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", pCreateInfo);
return skip;
}
bool CoreChecks::ValidateUpdateDescriptorSetWithTemplate(VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) const {
bool skip = false;
auto const template_map_entry = desc_template_map.find(descriptorUpdateTemplate);
if ((template_map_entry == desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) {
// Object tracker will report errors for invalid descriptorUpdateTemplate values, avoiding a crash in release builds
// but retaining the assert as template support is new enough to want to investigate these in debug builds.
assert(0);
} else {
const TEMPLATE_STATE *template_state = template_map_entry->second.get();
// TODO: Validate template push descriptor updates
if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) {
skip = ValidateUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state, pData);
}
}
return skip;
}
bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) const {
return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData);
}
bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) const {
return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData);
}
bool CoreChecks::PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
VkPipelineLayout layout, uint32_t set,
const void *pData) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *const func_name = "vkPushDescriptorSetWithTemplateKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, func_name);
const auto layout_data = GetPipelineLayout(layout);
const auto dsl = GetDslFromPipelineLayout(layout_data, set);
// Validate the set index points to a push descriptor set and is in range
if (dsl) {
if (!dsl->IsPushDescriptor()) {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set,
report_data->FormatHandle(layout).c_str());
}
} else if (layout_data && (set >= layout_data->set_layouts.size())) {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set,
report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(layout_data->set_layouts.size()));
}
const auto template_state = GetDescriptorTemplateState(descriptorUpdateTemplate);
if (template_state) {
const auto &template_ci = template_state->create_info;
static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV,
"VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366")};
skip |= ValidatePipelineBindPoint(cb_state, template_ci.pipelineBindPoint, func_name, bind_errors);
if (template_ci.templateType != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
skip |= LogError(cb_state->commandBuffer, kVUID_Core_PushDescriptorUpdate_TemplateType,
"%s: descriptorUpdateTemplate %s was not created with flag "
"VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR.",
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str());
}
if (template_ci.set != set) {
skip |= LogError(cb_state->commandBuffer, kVUID_Core_PushDescriptorUpdate_Template_SetMismatched,
"%s: descriptorUpdateTemplate %s created with set %" PRIu32
" does not match command parameter set %" PRIu32 ".",
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), template_ci.set, set);
}
if (!CompatForSet(set, layout_data, GetPipelineLayout(template_ci.pipelineLayout))) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(descriptorUpdateTemplate);
objlist.add(template_ci.pipelineLayout);
objlist.add(layout);
skip |= LogError(objlist, kVUID_Core_PushDescriptorUpdate_Template_LayoutMismatched,
"%s: descriptorUpdateTemplate %s created with %s is incompatible with command parameter "
"%s for set %" PRIu32,
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(),
report_data->FormatHandle(template_ci.pipelineLayout).c_str(),
report_data->FormatHandle(layout).c_str(), set);
}
}
if (dsl && template_state) {
// Create an empty proxy in order to use the existing descriptor set update validation
cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this);
// Decode the template into a set of write updates
cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state, pData,
dsl->GetDescriptorSetLayout());
// Validate the decoded update against the proxy_ds
skip |= ValidatePushDescriptorsUpdate(&proxy_ds, static_cast<uint32_t>(decoded_template.desc_writes.size()),
decoded_template.desc_writes.data(), func_name);
}
return skip;
}
bool CoreChecks::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
const char *api_name) const {
bool skip = false;
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) {
if (planeIndex >= physical_device_state->display_plane_property_count) {
skip |= LogError(physicalDevice, "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249",
"%s(): planeIndex (%u) must be in the range [0, %d] that was returned by "
"vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
"or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?",
api_name, planeIndex, physical_device_state->display_plane_property_count - 1);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex,
"vkGetDisplayPlaneSupportedDisplaysKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
uint32_t planeIndex,
VkDisplayPlaneCapabilitiesKHR *pCapabilities) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneCapabilitiesKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
VkDisplayPlaneCapabilities2KHR *pCapabilities) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, pDisplayPlaneInfo->planeIndex,
"vkGetDisplayPlaneCapabilities2KHR");
return skip;
}
bool CoreChecks::PreCallValidateCreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSurfaceKHR *pSurface) const {
bool skip = false;
const VkDisplayModeKHR display_mode = pCreateInfo->displayMode;
const uint32_t plane_index = pCreateInfo->planeIndex;
if (pCreateInfo->alphaMode == VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR) {
const float global_alpha = pCreateInfo->globalAlpha;
if ((global_alpha > 1.0f) || (global_alpha < 0.0f)) {
skip |= LogError(
display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-alphaMode-01254",
"vkCreateDisplayPlaneSurfaceKHR(): alphaMode is VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR but globalAlpha is %f.",
global_alpha);
}
}
const DISPLAY_MODE_STATE *dm_state = GetDisplayModeState(display_mode);
if (dm_state != nullptr) {
// Get physical device from VkDisplayModeKHR state tracking
const VkPhysicalDevice physical_device = dm_state->physical_device;
const auto physical_device_state = GetPhysicalDeviceState(physical_device);
VkPhysicalDeviceProperties device_properties = {};
DispatchGetPhysicalDeviceProperties(physical_device, &device_properties);
const uint32_t width = pCreateInfo->imageExtent.width;
const uint32_t height = pCreateInfo->imageExtent.height;
if (width >= device_properties.limits.maxImageDimension2D) {
skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-width-01256",
"vkCreateDisplayPlaneSurfaceKHR(): width (%" PRIu32
") exceeds device limit maxImageDimension2D (%" PRIu32 ").",
width, device_properties.limits.maxImageDimension2D);
}
if (height >= device_properties.limits.maxImageDimension2D) {
skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-width-01256",
"vkCreateDisplayPlaneSurfaceKHR(): height (%" PRIu32
") exceeds device limit maxImageDimension2D (%" PRIu32 ").",
height, device_properties.limits.maxImageDimension2D);
}
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) {
if (plane_index >= physical_device_state->display_plane_property_count) {
skip |=
LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-planeIndex-01252",
"vkCreateDisplayPlaneSurfaceKHR(): planeIndex (%u) must be in the range [0, %d] that was returned by "
"vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
"or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?",
plane_index, physical_device_state->display_plane_property_count - 1);
} else {
// call here once we know the plane index used is a valid plane index
VkDisplayPlaneCapabilitiesKHR plane_capabilities;
DispatchGetDisplayPlaneCapabilitiesKHR(physical_device, display_mode, plane_index, &plane_capabilities);
if ((pCreateInfo->alphaMode & plane_capabilities.supportedAlpha) == 0) {
skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-alphaMode-01255",
"vkCreateDisplayPlaneSurfaceKHR(): alphaMode is %s but planeIndex %u supportedAlpha (0x%x) "
"does not support the mode.",
string_VkDisplayPlaneAlphaFlagBitsKHR(pCreateInfo->alphaMode), plane_index,
plane_capabilities.supportedAlpha);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer,
const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmd(cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
}
bool CoreChecks::PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmd(cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
}
bool CoreChecks::PreCallValidateCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
VkQueryControlFlags flags, uint32_t index) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
QueryObject query_obj(queryPool, query, index);
const char *cmd_name = "vkCmdBeginQueryIndexedEXT()";
struct BeginQueryIndexedVuids : ValidateBeginQueryVuids {
BeginQueryIndexedVuids() : ValidateBeginQueryVuids() {
vuid_queue_flags = "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-cmdpool";
vuid_queue_feedback = "VUID-vkCmdBeginQueryIndexedEXT-queryType-02338";
vuid_queue_occlusion = "VUID-vkCmdBeginQueryIndexedEXT-queryType-00803";
vuid_precise = "VUID-vkCmdBeginQueryIndexedEXT-queryType-00800";
vuid_query_count = "VUID-vkCmdBeginQueryIndexedEXT-query-00802";
vuid_profile_lock = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03223";
vuid_scope_not_first = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03224";
vuid_scope_in_rp = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03225";
vuid_dup_query_type = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-04753";
vuid_protected_cb = "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-01885";
}
};
BeginQueryIndexedVuids vuids;
bool skip = ValidateBeginQuery(cb_state, query_obj, flags, index, CMD_BEGINQUERYINDEXEDEXT, cmd_name, &vuids);
// Extension specific VU's
const auto &query_pool_ci = GetQueryPoolState(query_obj.pool)->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) {
if (device_extensions.vk_ext_transform_feedback &&
(index >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams)) {
skip |= LogError(
cb_state->commandBuffer, "VUID-vkCmdBeginQueryIndexedEXT-queryType-02339",
"%s: index %" PRIu32
" must be less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams %" PRIu32 ".",
cmd_name, index, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams);
}
} else if (index != 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBeginQueryIndexedEXT-queryType-02340",
"%s: index %" PRIu32
" must be zero if %s was not created with type VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT.",
cmd_name, index, report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
void CoreChecks::PreCallRecordCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
VkQueryControlFlags flags, uint32_t index) {
if (disabled[query_validation]) return;
QueryObject query_obj = {queryPool, query, index};
EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQueryIndexedEXT()");
}
void CoreChecks::PreCallRecordCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
uint32_t index) {
if (disabled[query_validation]) return;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query_obj = {queryPool, query, index};
query_obj.endCommandIndex = cb_state->commandCount - 1;
EnqueueVerifyEndQuery(commandBuffer, query_obj);
}
bool CoreChecks::PreCallValidateCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
uint32_t index) const {
if (disabled[query_validation]) return false;
QueryObject query_obj = {queryPool, query, index};
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
struct EndQueryIndexedVuids : ValidateEndQueryVuids {
EndQueryIndexedVuids() : ValidateEndQueryVuids() {
vuid_queue_flags = "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-cmdpool";
vuid_active_queries = "VUID-vkCmdEndQueryIndexedEXT-None-02342";
vuid_protected_cb = "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-02344";
}
};
EndQueryIndexedVuids vuids;
return ValidateCmdEndQuery(cb_state, query_obj, index, CMD_ENDQUERYINDEXEDEXT, "vkCmdEndQueryIndexedEXT()", &vuids);
}
bool CoreChecks::PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
uint32_t discardRectangleCount,
const VkRect2D *pDiscardRectangles) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
// Minimal validation for command buffer state
skip |= ValidateCmd(cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetDiscardRectangleEXT-viewportScissor2D-04788",
"vkCmdSetDiscardRectangleEXT");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
const VkSampleLocationsInfoEXT *pSampleLocationsInfo) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// Minimal validation for command buffer state
skip |= ValidateCmd(cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
skip |= ValidateSampleLocationsInfo(pSampleLocationsInfo, "vkCmdSetSampleLocationsEXT");
const auto lv_bind_point = ConvertToLvlBindPoint(VK_PIPELINE_BIND_POINT_GRAPHICS);
const auto *pipe = cb_state->lastBound[lv_bind_point].pipeline_state;
if (pipe != nullptr) {
// Check same error with different log messages
const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = pipe->graphicsPipelineCI.pMultisampleState;
if (multisample_state == nullptr) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529",
"vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel must be equal to "
"rasterizationSamples, but the bound graphics pipeline was created without a multisample state");
} else if (multisample_state->rasterizationSamples != pSampleLocationsInfo->sampleLocationsPerPixel) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529",
"vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel (%s) is not equal to "
"the last bound pipeline's rasterizationSamples (%s)",
string_VkSampleCountFlagBits(pSampleLocationsInfo->sampleLocationsPerPixel),
string_VkSampleCountFlagBits(multisample_state->rasterizationSamples));
}
}
return skip;
}
bool CoreChecks::ValidateCreateSamplerYcbcrConversion(const char *func_name,
const VkSamplerYcbcrConversionCreateInfo *create_info) const {
bool skip = false;
const VkFormat conversion_format = create_info->format;
// Need to check for external format conversion first as it allows for non-UNORM format
bool external_format = false;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
const VkExternalFormatANDROID *ext_format_android = LvlFindInChain<VkExternalFormatANDROID>(create_info->pNext);
if ((nullptr != ext_format_android) && (0 != ext_format_android->externalFormat)) {
external_format = true;
if (VK_FORMAT_UNDEFINED != create_info->format) {
return LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904",
"%s: CreateInfo format is not VK_FORMAT_UNDEFINED while "
"there is a chained VkExternalFormatANDROID struct with a non-zero externalFormat.",
func_name);
}
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
if ((external_format == false) && (FormatIsUNorm(conversion_format) == false)) {
const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer)
? "VUID-VkSamplerYcbcrConversionCreateInfo-format-04061"
: "VUID-VkSamplerYcbcrConversionCreateInfo-format-04060";
skip |=
LogError(device, vuid,
"%s: CreateInfo format (%s) is not an UNORM format and there is no external format conversion being created.",
func_name, string_VkFormat(conversion_format));
}
// Gets VkFormatFeatureFlags according to Sampler Ycbcr Conversion Format Features
// (vkspec.html#potential-format-features)
VkFormatFeatureFlags format_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM;
if (conversion_format == VK_FORMAT_UNDEFINED) {
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// only check for external format inside VK_FORMAT_UNDEFINED check to prevent unnecessary extra errors from no format
// features being supported
if (external_format == true) {
auto it = ahb_ext_formats_map.find(ext_format_android->externalFormat);
if (it != ahb_ext_formats_map.end()) {
format_features = it->second;
}
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
} else {
format_features = GetPotentialFormatFeatures(conversion_format);
}
// Check all VUID that are based off of VkFormatFeatureFlags
// These can't be in StatelessValidation due to needing possible External AHB state for feature support
if (((format_features & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT) == 0) &&
((format_features & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) == 0)) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01650",
"%s: Format %s does not support either VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT or "
"VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT",
func_name, string_VkFormat(conversion_format));
}
if ((format_features & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) == 0) {
if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651",
"%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so xChromaOffset can't "
"be VK_CHROMA_LOCATION_COSITED_EVEN",
func_name, string_VkFormat(conversion_format));
}
if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651",
"%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so yChromaOffset can't "
"be VK_CHROMA_LOCATION_COSITED_EVEN",
func_name, string_VkFormat(conversion_format));
}
}
if ((format_features & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT) == 0) {
if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652",
"%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so xChromaOffset can't "
"be VK_CHROMA_LOCATION_MIDPOINT",
func_name, string_VkFormat(conversion_format));
}
if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652",
"%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so yChromaOffset can't "
"be VK_CHROMA_LOCATION_MIDPOINT",
func_name, string_VkFormat(conversion_format));
}
}
if (((format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT) == 0) &&
(create_info->forceExplicitReconstruction == VK_TRUE)) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-forceExplicitReconstruction-01656",
"%s: Format %s does not support "
"VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT so "
"forceExplicitReconstruction must be VK_FALSE",
func_name, string_VkFormat(conversion_format));
}
if (((format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT) == 0) &&
(create_info->chromaFilter == VK_FILTER_LINEAR)) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-chromaFilter-01657",
"%s: Format %s does not support VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT so "
"chromaFilter must not be VK_FILTER_LINEAR",
func_name, string_VkFormat(conversion_format));
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) const {
return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversion()", pCreateInfo);
}
bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device,
const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) const {
return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversionKHR()", pCreateInfo);
}
bool CoreChecks::PreCallValidateCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) const {
bool skip = false;
if (samplerMap.size() >= phys_dev_props.limits.maxSamplerAllocationCount) {
skip |= LogError(
device, "VUID-vkCreateSampler-maxSamplerAllocationCount-04110",
"vkCreateSampler(): Number of currently valid sampler objects (%zu) is not less than the maximum allowed (%u).",
samplerMap.size(), phys_dev_props.limits.maxSamplerAllocationCount);
}
if (enabled_features.core11.samplerYcbcrConversion == VK_TRUE) {
const VkSamplerYcbcrConversionInfo *conversion_info = LvlFindInChain<VkSamplerYcbcrConversionInfo>(pCreateInfo->pNext);
if (conversion_info != nullptr) {
const VkSamplerYcbcrConversion sampler_ycbcr_conversion = conversion_info->conversion;
const SAMPLER_YCBCR_CONVERSION_STATE *ycbcr_state = GetSamplerYcbcrConversionState(sampler_ycbcr_conversion);
if ((ycbcr_state->format_features &
VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT) == 0) {
const VkFilter chroma_filter = ycbcr_state->chromaFilter;
if (pCreateInfo->minFilter != chroma_filter) {
skip |= LogError(
device, "VUID-VkSamplerCreateInfo-minFilter-01645",
"VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is "
"not supported for SamplerYcbcrConversion's (%u) format %s so minFilter (%s) needs to be equal to "
"chromaFilter (%s)",
report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format),
string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter));
}
if (pCreateInfo->magFilter != chroma_filter) {
skip |= LogError(
device, "VUID-VkSamplerCreateInfo-minFilter-01645",
"VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is "
"not supported for SamplerYcbcrConversion's (%u) format %s so minFilter (%s) needs to be equal to "
"chromaFilter (%s)",
report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format),
string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter));
}
}
// At this point there is a known sampler YCbCr conversion enabled
const auto *sampler_reduction = LvlFindInChain<VkSamplerReductionModeCreateInfo>(pCreateInfo->pNext);
if (sampler_reduction != nullptr) {
if (sampler_reduction->reductionMode != VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE) {
skip |= LogError(device, "VUID-VkSamplerCreateInfo-None-01647",
"A sampler YCbCr Conversion is being used creating this sampler so the sampler reduction mode "
"must be VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE.");
}
}
}
}
if (pCreateInfo->borderColor == VK_BORDER_COLOR_INT_CUSTOM_EXT ||
pCreateInfo->borderColor == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT) {
if (!enabled_features.custom_border_color_features.customBorderColors) {
skip |=
LogError(device, "VUID-VkSamplerCreateInfo-customBorderColors-04085",
"vkCreateSampler(): A custom border color was specified without enabling the custom border color feature");
}
auto custom_create_info = LvlFindInChain<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo->pNext);
if (custom_create_info) {
if (custom_create_info->format == VK_FORMAT_UNDEFINED &&
!enabled_features.custom_border_color_features.customBorderColorWithoutFormat) {
skip |= LogError(device, "VUID-VkSamplerCustomBorderColorCreateInfoEXT-format-04014",
"vkCreateSampler(): A custom border color was specified as VK_FORMAT_UNDEFINED without the "
"customBorderColorWithoutFormat feature being enabled");
}
}
if (custom_border_color_sampler_count >= phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers) {
skip |= LogError(device, "VUID-VkSamplerCreateInfo-None-04012",
"vkCreateSampler(): Creating a sampler with a custom border color will exceed the "
"maxCustomBorderColorSamplers limit of %d",
phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers);
}
}
if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) {
if ((VK_FALSE == enabled_features.portability_subset_features.samplerMipLodBias) && pCreateInfo->mipLodBias != 0) {
skip |= LogError(device, "VUID-VkSamplerCreateInfo-samplerMipLodBias-04467",
"vkCreateSampler (portability error): mip LOD bias not supported.");
}
}
return skip;
}
bool CoreChecks::ValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo,
const char *apiName) const {
bool skip = false;
if (!enabled_features.core12.bufferDeviceAddress && !enabled_features.buffer_device_address_ext.bufferDeviceAddress) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-bufferDeviceAddress-03324",
"%s: The bufferDeviceAddress feature must: be enabled.", apiName);
}
if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice &&
!enabled_features.buffer_device_address_ext.bufferDeviceAddressMultiDevice) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-device-03325",
"%s: If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.",
apiName);
}
const auto buffer_state = GetBufferState(pInfo->buffer);
if (buffer_state) {
if (!(buffer_state->createInfo.flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, apiName, "VUID-VkBufferDeviceAddressInfo-buffer-02600");
}
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, true,
"VUID-VkBufferDeviceAddressInfo-buffer-02601", apiName,
"VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT");
}
return skip;
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferDeviceAddressEXT");
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferDeviceAddressKHR");
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferDeviceAddress");
}
bool CoreChecks::ValidateGetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo,
const char *apiName) const {
bool skip = false;
if (!enabled_features.core12.bufferDeviceAddress) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-None-03326",
"%s(): The bufferDeviceAddress feature must: be enabled.", apiName);
}
if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-device-03327",
"%s(): If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.",
apiName);
}
return skip;
}
bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferOpaqueCaptureAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferOpaqueCaptureAddressKHR");
}
bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferOpaqueCaptureAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferOpaqueCaptureAddress");
}
bool CoreChecks::ValidateGetDeviceMemoryOpaqueCaptureAddress(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo,
const char *apiName) const {
bool skip = false;
if (!enabled_features.core12.bufferDeviceAddress) {
skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-None-03334",
"%s(): The bufferDeviceAddress feature must: be enabled.", apiName);
}
if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) {
skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-device-03335",
"%s(): If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.",
apiName);
}
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory);
if (mem_info) {
auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext);
if (!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT)) {
skip |= LogError(pInfo->memory, "VUID-VkDeviceMemoryOpaqueCaptureAddressInfo-memory-03336",
"%s(): memory must have been allocated with VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT.", apiName);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddressKHR(VkDevice device,
const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo) const {
return ValidateGetDeviceMemoryOpaqueCaptureAddress(device, static_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>(pInfo),
"vkGetDeviceMemoryOpaqueCaptureAddressKHR");
}
bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddress(VkDevice device,
const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo) const {
return ValidateGetDeviceMemoryOpaqueCaptureAddress(device, static_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>(pInfo),
"vkGetDeviceMemoryOpaqueCaptureAddress");
}
bool CoreChecks::ValidateQueryRange(VkDevice device, VkQueryPool queryPool, uint32_t totalCount, uint32_t firstQuery,
uint32_t queryCount, const char *vuid_badfirst, const char *vuid_badrange,
const char *apiName) const {
bool skip = false;
if (firstQuery >= totalCount) {
skip |= LogError(device, vuid_badfirst,
"%s(): firstQuery (%" PRIu32 ") greater than or equal to query pool count (%" PRIu32 ") for %s", apiName,
firstQuery, totalCount, report_data->FormatHandle(queryPool).c_str());
}
if ((firstQuery + queryCount) > totalCount) {
skip |= LogError(device, vuid_badrange,
"%s(): Query range [%" PRIu32 ", %" PRIu32 ") goes beyond query pool count (%" PRIu32 ") for %s", apiName,
firstQuery, firstQuery + queryCount, totalCount, report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
bool CoreChecks::ValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
const char *apiName) const {
if (disabled[query_validation]) return false;
bool skip = false;
if (!enabled_features.core12.hostQueryReset) {
skip |= LogError(device, "VUID-vkResetQueryPool-None-02665", "%s(): Host query reset not enabled for device", apiName);
}
const auto query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
skip |= ValidateQueryRange(device, queryPool, query_pool_state->createInfo.queryCount, firstQuery, queryCount,
"VUID-vkResetQueryPool-firstQuery-02666", "VUID-vkResetQueryPool-firstQuery-02667", apiName);
}
return skip;
}
bool CoreChecks::PreCallValidateResetQueryPoolEXT(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount, "vkResetQueryPoolEXT");
}
bool CoreChecks::PreCallValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount, "vkResetQueryPool");
}
VkResult CoreChecks::CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkValidationCacheEXT *pValidationCache) {
*pValidationCache = ValidationCache::Create(pCreateInfo);
return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
}
void CoreChecks::CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
const VkAllocationCallbacks *pAllocator) {
delete CastFromHandle<ValidationCache *>(validationCache);
}
VkResult CoreChecks::CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
void *pData) {
size_t in_size = *pDataSize;
CastFromHandle<ValidationCache *>(validationCache)->Write(pDataSize, pData);
return (pData && *pDataSize != in_size) ? VK_INCOMPLETE : VK_SUCCESS;
}
VkResult CoreChecks::CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
const VkValidationCacheEXT *pSrcCaches) {
bool skip = false;
auto dst = CastFromHandle<ValidationCache *>(dstCache);
VkResult result = VK_SUCCESS;
for (uint32_t i = 0; i < srcCacheCount; i++) {
auto src = CastFromHandle<const ValidationCache *>(pSrcCaches[i]);
if (src == dst) {
skip |= LogError(device, "VUID-vkMergeValidationCachesEXT-dstCache-01536",
"vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
HandleToUint64(dstCache));
result = VK_ERROR_VALIDATION_FAILED_EXT;
}
if (!skip) {
dst->Merge(src);
}
}
return result;
}
bool CoreChecks::ValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask, const char *func_name) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
skip |= ValidateCmd(cb_state, CMD_SETDEVICEMASK, func_name);
skip |= ValidateDeviceMaskToPhysicalDeviceCount(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00108");
skip |= ValidateDeviceMaskToZero(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00109");
skip |= ValidateDeviceMaskToCommandBuffer(cb_state, deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00110");
if (cb_state->activeRenderPass) {
skip |= ValidateDeviceMaskToRenderPass(cb_state, deviceMask, "VUID-vkCmdSetDeviceMask-deviceMask-00111");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask) const {
return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, "vkSetDeviceMask()");
}
bool CoreChecks::PreCallValidateCmdSetDeviceMaskKHR(VkCommandBuffer commandBuffer, uint32_t deviceMask) const {
return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, "vkSetDeviceMaskKHR()");
}
bool CoreChecks::ValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue,
const char *apiName) const {
bool skip = false;
const auto *semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) {
skip |= LogError(semaphore, "VUID-vkGetSemaphoreCounterValue-semaphore-03255",
"%s(): semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE type", apiName,
report_data->FormatHandle(semaphore).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateGetSemaphoreCounterValueKHR(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const {
return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "vkGetSemaphoreCounterValueKHR");
}
bool CoreChecks::PreCallValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const {
return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "vkGetSemaphoreCounterValue");
}
bool CoreChecks::ValidateQueryPoolStride(const std::string &vuid_not_64, const std::string &vuid_64, const VkDeviceSize stride,
const char *parameter_name, const uint64_t parameter_value,
const VkQueryResultFlags flags) const {
bool skip = false;
if (flags & VK_QUERY_RESULT_64_BIT) {
static const int condition_multiples = 0b0111;
if ((stride & condition_multiples) || (parameter_value & condition_multiples)) {
skip |= LogError(device, vuid_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name,
parameter_value);
}
} else {
static const int condition_multiples = 0b0011;
if ((stride & condition_multiples) || (parameter_value & condition_multiples)) {
skip |= LogError(device, vuid_not_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name,
parameter_value);
}
}
return skip;
}
bool CoreChecks::ValidateCmdDrawStrideWithStruct(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride,
const char *struct_name, const uint32_t struct_size) const {
bool skip = false;
static const int condition_multiples = 0b0011;
if ((stride & condition_multiples) || (stride < struct_size)) {
skip |= LogError(commandBuffer, vuid, "stride %d is invalid or less than sizeof(%s) %d.", stride, struct_name, struct_size);
}
return skip;
}
bool CoreChecks::ValidateCmdDrawStrideWithBuffer(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride,
const char *struct_name, const uint32_t struct_size, const uint32_t drawCount,
const VkDeviceSize offset, const BUFFER_STATE *buffer_state) const {
bool skip = false;
uint64_t validation_value = stride * (drawCount - 1) + offset + struct_size;
if (validation_value > buffer_state->createInfo.size) {
skip |= LogError(commandBuffer, vuid,
"stride[%d] * (drawCount[%d] - 1) + offset[%" PRIx64 "] + sizeof(%s)[%d] = %" PRIx64
" is greater than the size[%" PRIx64 "] of %s.",
stride, drawCount, offset, struct_name, struct_size, validation_value, buffer_state->createInfo.size,
report_data->FormatHandle(buffer_state->buffer).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateReleaseProfilingLockKHR(VkDevice device) const {
bool skip = false;
if (!performance_lock_acquired) {
skip |= LogError(device, "VUID-vkReleaseProfilingLockKHR-device-03235",
"vkReleaseProfilingLockKHR(): The profiling lock of device must have been held via a previous successful "
"call to vkAcquireProfilingLockKHR.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetCheckpointNV(VkCommandBuffer commandBuffer, const void *pCheckpointMarker) const {
{
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETCHECKPOINTNV, "vkCmdSetCheckpointNV()");
return skip;
}
}
bool CoreChecks::PreCallValidateWriteAccelerationStructuresPropertiesKHR(VkDevice device, uint32_t accelerationStructureCount,
const VkAccelerationStructureKHR *pAccelerationStructures,
VkQueryType queryType, size_t dataSize, void *pData,
size_t stride) const {
bool skip = false;
for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(pAccelerationStructures[i]);
const auto &as_info = as_state->build_info_khr;
if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) {
if (!(as_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |= LogError(device, "VUID-vkWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431",
"vkWriteAccelerationStructuresPropertiesKHR: All acceleration structures (%s) in "
"pAccelerationStructures must have been built with"
"VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is "
"VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR.",
report_data->FormatHandle(as_state->acceleration_structure).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteAccelerationStructuresPropertiesKHR(
VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures,
VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
skip |= ValidateCmd(cb_state, CMD_WRITEACCELERATIONSTRUCTURESPROPERTIESKHR, "vkCmdWriteAccelerationStructuresPropertiesKHR()");
const auto *query_pool_state = GetQueryPoolState(queryPool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType != queryType) {
skip |= LogError(
device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-queryPool-02493",
"vkCmdWriteAccelerationStructuresPropertiesKHR: queryPool must have been created with a queryType matching queryType.");
}
for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) {
const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(pAccelerationStructures[i]);
if (!(as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |= LogError(
device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431",
"vkCmdWriteAccelerationStructuresPropertiesKHR: All acceleration structures in pAccelerationStructures "
"must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is "
"VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR.");
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer,
uint32_t accelerationStructureCount,
const VkAccelerationStructureNV *pAccelerationStructures,
VkQueryType queryType, VkQueryPool queryPool,
uint32_t firstQuery) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
skip |= ValidateCmd(cb_state, CMD_WRITEACCELERATIONSTRUCTURESPROPERTIESNV, "vkCmdWriteAccelerationStructuresPropertiesNV()");
const auto *query_pool_state = GetQueryPoolState(queryPool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType != queryType) {
skip |= LogError(
device, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-queryPool-03755",
"vkCmdWriteAccelerationStructuresPropertiesNV: queryPool must have been created with a queryType matching queryType.");
}
for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV) {
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(pAccelerationStructures[i]);
if (!(as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |=
LogError(device, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-accelerationStructures-03431",
"vkCmdWriteAccelerationStructuresPropertiesNV: All acceleration structures in pAccelerationStructures "
"must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is "
"VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV.");
}
}
}
return skip;
}
uint32_t CoreChecks::CalcTotalShaderGroupCount(const PIPELINE_STATE *pipelineState) const {
uint32_t total = pipelineState->raytracingPipelineCI.groupCount;
if (pipelineState->raytracingPipelineCI.pLibraryInfo) {
for (uint32_t i = 0; i < pipelineState->raytracingPipelineCI.pLibraryInfo->libraryCount; ++i) {
const PIPELINE_STATE *library_pipeline_state =
GetPipelineState(pipelineState->raytracingPipelineCI.pLibraryInfo->pLibraries[i]);
total += CalcTotalShaderGroupCount(library_pipeline_state);
}
}
return total;
}
bool CoreChecks::PreCallValidateGetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup,
uint32_t groupCount, size_t dataSize, void *pData) const {
bool skip = false;
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
if (pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) {
skip |= LogError(
device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-pipeline-03482",
"vkGetRayTracingShaderGroupHandlesKHR: pipeline must have not been created with VK_PIPELINE_CREATE_LIBRARY_BIT_KHR.");
}
if (dataSize < (phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleSize * groupCount)) {
skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-dataSize-02420",
"vkGetRayTracingShaderGroupHandlesKHR: dataSize (%zu) must be at least "
"VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleSize * groupCount.",
dataSize);
}
uint32_t total_group_count = CalcTotalShaderGroupCount(pipeline_state);
if (firstGroup >= total_group_count) {
skip |=
LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-04050",
"vkGetRayTracingShaderGroupHandlesKHR: firstGroup must be less than the number of shader groups in pipeline.");
}
if ((firstGroup + groupCount) > total_group_count) {
skip |= LogError(
device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-02419",
"vkGetRayTracingShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less than or equal the number "
"of shader groups in pipeline.");
}
return skip;
}
bool CoreChecks::PreCallValidateGetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline,
uint32_t firstGroup, uint32_t groupCount,
size_t dataSize, void *pData) const {
bool skip = false;
if (dataSize < (phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleCaptureReplaySize * groupCount)) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-dataSize-03484",
"vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: dataSize (%zu) must be at least "
"VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleCaptureReplaySize * groupCount.",
dataSize);
}
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
if (!pipeline_state) {
return skip;
}
if (firstGroup >= pipeline_state->raytracingPipelineCI.groupCount) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-04051",
"vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: firstGroup must be less than the number of shader "
"groups in pipeline.");
}
if ((firstGroup + groupCount) > pipeline_state->raytracingPipelineCI.groupCount) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-03483",
"vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less "
"than or equal to the number of shader groups in pipeline.");
}
if (!(pipeline_state->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR)) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-pipeline-03607",
"pipeline must have been created with a flags that included "
"VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructuresIndirectKHR(VkCommandBuffer commandBuffer, uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
const VkDeviceAddress *pIndirectDeviceAddresses,
const uint32_t *pIndirectStrides,
const uint32_t *const *ppMaxPrimitiveCounts) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURESINDIRECTKHR, "vkCmdBuildAccelerationStructuresIndirectKHR()");
for (uint32_t i = 0; i < infoCount; ++i) {
const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfos[i].srcAccelerationStructure);
const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state = GetAccelerationStructureStateKHR(pInfos[i].dstAccelerationStructure);
if (pInfos[i].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) {
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03667",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must have "
"been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in "
"VkAccelerationStructureBuildGeometryInfoKHR::flags.");
}
if (pInfos[i].geometryCount != src_as_state->build_info_khr.geometryCount) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03758",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR,"
" its geometryCount member must have the same value which was specified when "
"srcAccelerationStructure was last built.");
}
if (pInfos[i].flags != src_as_state->build_info_khr.flags) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03759",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
if (pInfos[i].type != src_as_state->build_info_khr.type) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03760",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
}
if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03700",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have "
"been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03699",
"vkCmdBuildAccelerationStructuresIndirectKHR():For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been "
"created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
}
return skip;
}
bool CoreChecks::ValidateCopyAccelerationStructureInfoKHR(const VkCopyAccelerationStructureInfoKHR *pInfo,
const char *api_name) const {
bool skip = false;
if (pInfo->mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR) {
const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfo->src);
if (!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |= LogError(device, "VUID-VkCopyAccelerationStructureInfoKHR-src-03411",
"(%s): src must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR"
"if mode is VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR.",
api_name);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer,
const VkCopyAccelerationStructureInfoKHR *pInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTUREKHR, "vkCmdCopyAccelerationStructureKHR()");
skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCmdCopyAccelerationStructureKHR");
return false;
}
bool CoreChecks::PreCallValidateCopyAccelerationStructureKHR(VkDevice device, VkDeferredOperationKHR deferredOperation,
const VkCopyAccelerationStructureInfoKHR *pInfo) const {
bool skip = false;
skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCopyAccelerationStructureKHR");
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureToMemoryKHR(
VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR *pInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURETOMEMORYKHR, "vkCmdCopyAccelerationStructureToMemoryKHR()");
const auto *accel_state = GetAccelerationStructureStateKHR(pInfo->src);
if (accel_state) {
const auto *buffer_state = GetBufferState(accel_state->create_infoKHR.buffer);
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdCopyAccelerationStructureToMemoryKHR",
"VUID-vkCmdCopyAccelerationStructureToMemoryKHR-None-03559");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyMemoryToAccelerationStructureKHR(
VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_COPYMEMORYTOACCELERATIONSTRUCTUREKHR, "vkCmdCopyMemoryToAccelerationStructureKHR()");
return skip;
}
bool CoreChecks::PreCallValidateCmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer, uint32_t firstBinding,
uint32_t bindingCount, const VkBuffer *pBuffers,
const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes) const {
bool skip = false;
char const *const cmd_name = "CmdBindTransformFeedbackBuffersEXT";
if (!enabled_features.transform_feedback_features.transformFeedback) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-transformFeedback-02355",
"%s: transformFeedback feature is not enabled.", cmd_name);
}
{
auto const cb_state = GetCBState(commandBuffer);
if (cb_state->transform_feedback_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-None-02365",
"%s: transform feedback is active.", cmd_name);
}
}
for (uint32_t i = 0; i < bindingCount; ++i) {
auto const buffer_state = GetBufferState(pBuffers[i]);
assert(buffer_state != nullptr);
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02358",
"%s: pOffset[%" PRIu32 "](0x%" PRIxLEAST64
") is greater than or equal to the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pOffsets[i], i, buffer_state->createInfo.size);
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT) == 0) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02360",
"%s: pBuffers[%" PRIu32 "] (0x%" PRIxLEAST64
") was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT flag.",
cmd_name, i, pBuffers[i]);
}
// pSizes is optional and may be nullptr. Also might be VK_WHOLE_SIZE which VU don't apply
if ((pSizes != nullptr) && (pSizes[i] != VK_WHOLE_SIZE)) {
// only report one to prevent redundant error if the size is larger since adding offset will be as well
if (pSizes[i] > buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pSizes-02362",
"%s: pSizes[%" PRIu32 "](0x%" PRIxLEAST64 ") is greater than the size of pBuffers[%" PRIu32
"](0x%" PRIxLEAST64 ").",
cmd_name, i, pSizes[i], i, buffer_state->createInfo.size);
} else if (pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02363",
"%s: The sum of pOffsets[%" PRIu32 "](Ox%" PRIxLEAST64 ") and pSizes[%" PRIu32 "](0x%" PRIxLEAST64
") is greater than the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pOffsets[i], i, pSizes[i], i, buffer_state->createInfo.size);
}
}
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, cmd_name, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02364");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer,
uint32_t counterBufferCount, const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets) const {
bool skip = false;
char const *const cmd_name = "CmdBeginTransformFeedbackEXT";
if (!enabled_features.transform_feedback_features.transformFeedback) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-transformFeedback-02366",
"%s: transformFeedback feature is not enabled.", cmd_name);
}
{
auto const cb_state = GetCBState(commandBuffer);
if (cb_state->transform_feedback_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-None-02367", "%s: transform feedback is active.",
cmd_name);
}
}
// pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr
// if pCounterBuffers is nullptr.
if (pCounterBuffers == nullptr) {
if (pCounterBufferOffsets != nullptr) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffer-02371",
"%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name);
}
} else {
for (uint32_t i = 0; i < counterBufferCount; ++i) {
if (pCounterBuffers[i] != VK_NULL_HANDLE) {
auto const buffer_state = GetBufferState(pCounterBuffers[i]);
assert(buffer_state != nullptr);
if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBufferOffsets-02370",
"%s: pCounterBuffers[%" PRIu32 "](0x%" PRIxLEAST64
") is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pCounterBuffers[i], i, pCounterBufferOffsets[i]);
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffers-02372",
"%s: pCounterBuffers[%" PRIu32 "] (0x%" PRIxLEAST64
") was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.",
cmd_name, i, pCounterBuffers[i]);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer,
uint32_t counterBufferCount, const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets) const {
bool skip = false;
char const *const cmd_name = "CmdEndTransformFeedbackEXT";
if (!enabled_features.transform_feedback_features.transformFeedback) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-transformFeedback-02374",
"%s: transformFeedback feature is not enabled.", cmd_name);
}
{
auto const cb_state = GetCBState(commandBuffer);
if (!cb_state->transform_feedback_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-None-02375", "%s: transform feedback is not active.",
cmd_name);
}
}
// pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr
// if pCounterBuffers is nullptr.
if (pCounterBuffers == nullptr) {
if (pCounterBufferOffsets != nullptr) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffer-02379",
"%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name);
}
} else {
for (uint32_t i = 0; i < counterBufferCount; ++i) {
if (pCounterBuffers[i] != VK_NULL_HANDLE) {
auto const buffer_state = GetBufferState(pCounterBuffers[i]);
assert(buffer_state != nullptr);
if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBufferOffsets-02378",
"%s: pCounterBuffers[%" PRIu32 "](0x%" PRIxLEAST64
") is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pCounterBuffers[i], i, pCounterBufferOffsets[i]);
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffers-02380",
"%s: pCounterBuffers[%" PRIu32 "] (0x%" PRIxLEAST64
") was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.",
cmd_name, i, pCounterBuffers[i]);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetLogicOpEXT(VkCommandBuffer commandBuffer, VkLogicOp logicOp) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETLOGICOPEXT, "vkCmdSetLogicOpEXT()");
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2LogicOp) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetLogicOpEXT-None-04867",
"vkCmdSetLogicOpEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetPatchControlPointsEXT(VkCommandBuffer commandBuffer, uint32_t patchControlPoints) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETPATCHCONTROLPOINTSEXT, "vkCmdSetPatchControlPointsEXT()");
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2PatchControlPoints) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetPatchControlPointsEXT-None-04873",
"vkCmdSetPatchControlPointsEXT: extendedDynamicState feature is not enabled.");
}
if (patchControlPoints > phys_dev_props.limits.maxTessellationPatchSize) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetPatchControlPointsEXT-patchControlPoints-04874",
"vkCmdSetPatchControlPointsEXT: The value of patchControlPoints must be less than "
"VkPhysicalDeviceLimits::maxTessellationPatchSize");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetRasterizerDiscardEnableEXT(VkCommandBuffer commandBuffer,
VkBool32 rasterizerDiscardEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETRASTERIZERDISCARDENABLEEXT, "vkCmdSetRasterizerDiscardEnableEXT()");
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetRasterizerDiscardEnableEXT-None-04871",
"vkCmdSetRasterizerDiscardEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBiasEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBIASENABLEEXT, "vkCmdSetDepthBiasEnableEXT()");
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBiasEnableEXT-None-04872",
"vkCmdSetDepthBiasEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetPrimitiveRestartEnableEXT(VkCommandBuffer commandBuffer,
VkBool32 primitiveRestartEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETPRIMITIVERESTARTENABLEEXT, "vkCmdSetPrimitiveRestartEnableEXT()");
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetPrimitiveRestartEnableEXT-None-04866",
"vkCmdSetPrimitiveRestartEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetCullModeEXT(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETCULLMODEEXT, "vkCmdSetCullModeEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetCullModeEXT-None-03384",
"vkCmdSetCullModeEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetFrontFaceEXT(VkCommandBuffer commandBuffer, VkFrontFace frontFace) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETFRONTFACEEXT, "vkCmdSetFrontFaceEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetFrontFaceEXT-None-03383",
"vkCmdSetFrontFaceEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetPrimitiveTopologyEXT(VkCommandBuffer commandBuffer,
VkPrimitiveTopology primitiveTopology) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETPRIMITIVETOPOLOGYEXT, "vkCmdSetPrimitiveTopologyEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetPrimitiveTopologyEXT-None-03347",
"vkCmdSetPrimitiveTopologyEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportWithCountEXT(VkCommandBuffer commandBuffer, uint32_t viewportCount,
const VkViewport *pViewports) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTWITHCOUNTEXT, "vkCmdSetViewportWithCountEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportWithCountEXT-None-03393",
"vkCmdSetViewportWithCountEXT: extendedDynamicState feature is not enabled.");
}
skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetViewportWithCountEXT-commandBuffer-04819",
"vkCmdSetViewportWithCountEXT");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetScissorWithCountEXT(VkCommandBuffer commandBuffer, uint32_t scissorCount,
const VkRect2D *pScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSCISSORWITHCOUNTEXT, "vkCmdSetScissorWithCountEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-None-03396",
"vkCmdSetScissorWithCountEXT: extendedDynamicState feature is not enabled.");
}
skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetScissorWithCountEXT-commandBuffer-04820",
"vkCmdSetScissorWithCountEXT");
return skip;
}
bool CoreChecks::PreCallValidateCmdBindVertexBuffers2EXT(VkCommandBuffer commandBuffer, uint32_t firstBinding,
uint32_t bindingCount, const VkBuffer *pBuffers,
const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes,
const VkDeviceSize *pStrides) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS2EXT, "vkCmdBindVertexBuffers2EXT()");
for (uint32_t i = 0; i < bindingCount; ++i) {
const auto buffer_state = GetBufferState(pBuffers[i]);
if (buffer_state) {
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true,
"VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03359", "vkCmdBindVertexBuffers2EXT()",
"VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers2EXT()",
"VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03360");
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindVertexBuffers2EXT-pOffsets-03357",
"vkCmdBindVertexBuffers2EXT() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.",
pOffsets[i]);
}
if (pSizes && pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer, "VUID-vkCmdBindVertexBuffers2EXT-pSizes-03358",
"vkCmdBindVertexBuffers2EXT() size (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pSizes[i]);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHTESTENABLEEXT, "vkCmdSetDepthTestEnableEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthTestEnableEXT-None-03352",
"vkCmdSetDepthTestEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthWriteEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHWRITEENABLEEXT, "vkCmdSetDepthWriteEnableEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthWriteEnableEXT-None-03354",
"vkCmdSetDepthWriteEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthCompareOpEXT(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHCOMPAREOPEXT, "vkCmdSetDepthCompareOpEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthCompareOpEXT-None-03353",
"vkCmdSetDepthCompareOpEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBoundsTestEnableEXT(VkCommandBuffer commandBuffer,
VkBool32 depthBoundsTestEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDSTESTENABLEEXT, "vkCmdSetDepthBoundsTestEnableEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBoundsTestEnableEXT-None-03349",
"vkCmdSetDepthBoundsTestEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSTENCILTESTENABLEEXT, "vkCmdSetStencilTestEnableEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilTestEnableEXT-None-03350",
"vkCmdSetStencilTestEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilOpEXT(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp,
VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSTENCILOPEXT, "vkCmdSetStencilOpEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilOpEXT-None-03351",
"vkCmdSetStencilOpEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) const {
bool skip = false;
if (device_extensions.vk_khr_portability_subset != ExtEnabled::kNotEnabled) {
if (VK_FALSE == enabled_features.portability_subset_features.events) {
skip |= LogError(device, "VUID-vkCreateEvent-events-04468",
"vkCreateEvent: events are not supported via VK_KHR_portability_subset");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetRayTracingPipelineStackSizeKHR(VkCommandBuffer commandBuffer,
uint32_t pipelineStackSize) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip |= ValidateCmd(cb_state, CMD_SETRAYTRACINGPIPELINESTACKSIZEKHR, "vkCmdSetRayTracingPipelineStackSizeKHR()");
return skip;
}
bool CoreChecks::PreCallValidateGetRayTracingShaderGroupStackSizeKHR(VkDevice device, VkPipeline pipeline, uint32_t group,
VkShaderGroupShaderKHR groupShader) const {
bool skip = false;
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
if (group >= pipeline_state->raytracingPipelineCI.groupCount) {
skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupStackSizeKHR-group-03608",
"vkGetRayTracingShaderGroupStackSizeKHR: The value of group must be less than the number of shader groups "
"in pipeline.");
}
return skip;
}
void PIPELINE_STATE::initGraphicsPipeline(const ValidationStateTracker *state_data, const VkGraphicsPipelineCreateInfo *pCreateInfo,
std::shared_ptr<const RENDER_PASS_STATE> &&rpstate) {
reset();
bool uses_color_attachment = false;
bool uses_depthstencil_attachment = false;
if (pCreateInfo->subpass < rpstate->createInfo.subpassCount) {
const auto &subpass = rpstate->createInfo.pSubpasses[pCreateInfo->subpass];
for (uint32_t i = 0; i < subpass.colorAttachmentCount; ++i) {
if (subpass.pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
uses_color_attachment = true;
break;
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uses_depthstencil_attachment = true;
}
}
graphicsPipelineCI.initialize(pCreateInfo, uses_color_attachment, uses_depthstencil_attachment);
if (graphicsPipelineCI.pInputAssemblyState) {
topology_at_rasterizer = graphicsPipelineCI.pInputAssemblyState->topology;
}
stage_state.resize(pCreateInfo->stageCount);
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
const VkPipelineShaderStageCreateInfo *pssci = &pCreateInfo->pStages[i];
this->duplicate_shaders |= this->active_shaders & pssci->stage;
this->active_shaders |= pssci->stage;
state_data->RecordPipelineShaderStage(pssci, this, &stage_state[i]);
}
if (graphicsPipelineCI.pVertexInputState) {
const auto vici = graphicsPipelineCI.pVertexInputState;
if (vici->vertexBindingDescriptionCount) {
this->vertex_binding_descriptions_ = std::vector<VkVertexInputBindingDescription>(
vici->pVertexBindingDescriptions, vici->pVertexBindingDescriptions + vici->vertexBindingDescriptionCount);
this->vertex_binding_to_index_map_.reserve(vici->vertexBindingDescriptionCount);
for (uint32_t i = 0; i < vici->vertexBindingDescriptionCount; ++i) {
this->vertex_binding_to_index_map_[vici->pVertexBindingDescriptions[i].binding] = i;
}
}
if (vici->vertexAttributeDescriptionCount) {
this->vertex_attribute_descriptions_ = std::vector<VkVertexInputAttributeDescription>(
vici->pVertexAttributeDescriptions, vici->pVertexAttributeDescriptions + vici->vertexAttributeDescriptionCount);
for (uint32_t i = 0; i < vici->vertexAttributeDescriptionCount; ++i) {
const auto attribute_format = vici->pVertexAttributeDescriptions[i].format;
VkDeviceSize vtx_attrib_req_alignment = FormatElementSize(attribute_format);
if (FormatElementIsTexel(attribute_format)) {
vtx_attrib_req_alignment = SafeDivision(vtx_attrib_req_alignment, FormatChannelCount(attribute_format));
}
this->vertex_attribute_alignments_.push_back(vtx_attrib_req_alignment);
}
}
}
if (graphicsPipelineCI.pColorBlendState) {
const auto cbci = graphicsPipelineCI.pColorBlendState;
if (cbci->attachmentCount) {
this->attachments =
std::vector<VkPipelineColorBlendAttachmentState>(cbci->pAttachments, cbci->pAttachments + cbci->attachmentCount);
}
}
rp_state = rpstate;
}
void PIPELINE_STATE::initComputePipeline(const ValidationStateTracker *state_data, const VkComputePipelineCreateInfo *pCreateInfo) {
reset();
computePipelineCI.initialize(pCreateInfo);
switch (computePipelineCI.stage.stage) {
case VK_SHADER_STAGE_COMPUTE_BIT:
this->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
stage_state.resize(1);
state_data->RecordPipelineShaderStage(&pCreateInfo->stage, this, &stage_state[0]);
break;
default:
// TODO : Flag error
break;
}
}
template <typename CreateInfo>
void PIPELINE_STATE::initRayTracingPipeline(const ValidationStateTracker *state_data, const CreateInfo *pCreateInfo) {
reset();
raytracingPipelineCI.initialize(pCreateInfo);
stage_state.resize(pCreateInfo->stageCount);
for (uint32_t stage_index = 0; stage_index < pCreateInfo->stageCount; stage_index++) {
const auto &shader_stage = pCreateInfo->pStages[stage_index];
switch (shader_stage.stage) {
case VK_SHADER_STAGE_RAYGEN_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
break;
case VK_SHADER_STAGE_ANY_HIT_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_ANY_HIT_BIT_NV;
break;
case VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV;
break;
case VK_SHADER_STAGE_MISS_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_MISS_BIT_NV;
break;
case VK_SHADER_STAGE_INTERSECTION_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_INTERSECTION_BIT_NV;
break;
case VK_SHADER_STAGE_CALLABLE_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_CALLABLE_BIT_NV;
break;
default:
// TODO : Flag error
break;
}
state_data->RecordPipelineShaderStage(&shader_stage, this, &stage_state[stage_index]);
}
}
template void PIPELINE_STATE::initRayTracingPipeline(const ValidationStateTracker *state_data,
const VkRayTracingPipelineCreateInfoNV *pCreateInfo);
template void PIPELINE_STATE::initRayTracingPipeline(const ValidationStateTracker *state_data,
const VkRayTracingPipelineCreateInfoKHR *pCreateInfo);
bool CoreChecks::PreCallValidateCmdSetFragmentShadingRateKHR(VkCommandBuffer commandBuffer, const VkExtent2D *pFragmentSize,
const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *cmd_name = "vkCmdSetFragmentShadingRateKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETFRAGMENTSHADINGRATEKHR, cmd_name);
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate &&
!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate &&
!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
skip |= LogError(
cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04509",
"vkCmdSetFragmentShadingRateKHR: Application called %s, but no fragment shading rate features have been enabled.",
cmd_name);
}
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && pFragmentSize->width != 1) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04507",
"vkCmdSetFragmentShadingRateKHR: Pipeline fragment width of %u has been specified in %s, but "
"pipelineFragmentShadingRate is not enabled",
pFragmentSize->width, cmd_name);
}
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && pFragmentSize->height != 1) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04508",
"vkCmdSetFragmentShadingRateKHR: Pipeline fragment height of %u has been specified in %s, but "
"pipelineFragmentShadingRate is not enabled",
pFragmentSize->height, cmd_name);
}
if (!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate &&
combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-primitiveFragmentShadingRate-04510",
"vkCmdSetFragmentShadingRateKHR: First combiner operation of %s has been specified in %s, but "
"primitiveFragmentShadingRate is not enabled",
string_VkFragmentShadingRateCombinerOpKHR(combinerOps[0]), cmd_name);
}
if (!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate &&
combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-attachmentFragmentShadingRate-04511",
"vkCmdSetFragmentShadingRateKHR: Second combiner operation of %s has been specified in %s, but "
"attachmentFragmentShadingRate is not enabled",
string_VkFragmentShadingRateCombinerOpKHR(combinerOps[1]), cmd_name);
}
if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps &&
(combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR &&
combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-fragmentSizeNonTrivialCombinerOps-04512",
"vkCmdSetFragmentShadingRateKHR: First combiner operation of %s has been specified in %s, but "
"fragmentShadingRateNonTrivialCombinerOps is "
"not supported",
string_VkFragmentShadingRateCombinerOpKHR(combinerOps[0]), cmd_name);
}
if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps &&
(combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR &&
combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-fragmentSizeNonTrivialCombinerOps-04512",
"vkCmdSetFragmentShadingRateKHR: Second combiner operation of %s has been specified in %s, but "
"fragmentShadingRateNonTrivialCombinerOps "
"is not supported",
string_VkFragmentShadingRateCombinerOpKHR(combinerOps[1]), cmd_name);
}
if (pFragmentSize->width == 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04513",
"vkCmdSetFragmentShadingRateKHR: Fragment width of %u has been specified in %s.", pFragmentSize->width,
cmd_name);
}
if (pFragmentSize->height == 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04514",
"vkCmdSetFragmentShadingRateKHR: Fragment height of %u has been specified in %s.", pFragmentSize->height,
cmd_name);
}
if (pFragmentSize->width != 0 && !IsPowerOfTwo(pFragmentSize->width)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04515",
"vkCmdSetFragmentShadingRateKHR: Non-power-of-two fragment width of %u has been specified in %s.",
pFragmentSize->width, cmd_name);
}
if (pFragmentSize->height != 0 && !IsPowerOfTwo(pFragmentSize->height)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04516",
"vkCmdSetFragmentShadingRateKHR: Non-power-of-two fragment height of %u has been specified in %s.",
pFragmentSize->height, cmd_name);
}
if (pFragmentSize->width > 4) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04517",
"vkCmdSetFragmentShadingRateKHR: Fragment width of %u specified in %s is too large.", pFragmentSize->width,
cmd_name);
}
if (pFragmentSize->height > 4) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04518",
"vkCmdSetFragmentShadingRateKHR: Fragment height of %u specified in %s is too large",
pFragmentSize->height, cmd_name);
}
return skip;
}
| 1 | 16,153 | I think these got refactored to `LvlFindInChain` to better conform with the coding guidelines. I think MarkL left these in for backwards compatibility. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -50,6 +50,7 @@ def _column_op(f):
:param self: Koalas Series
:param args: arguments that the function `f` takes.
"""
+
@wraps(f)
def wrapper(self, *args):
assert all((not isinstance(arg, Series)) or (arg._kdf is self._kdf) for arg in args), \ | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark Column to behave similar to pandas Series.
"""
import inspect
from functools import partial, wraps
from typing import Any
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.types import BooleanType, FloatType, DoubleType, LongType, StringType, \
StructType, TimestampType, to_arrow_type
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.frame import DataFrame
from databricks.koalas.generic import _Frame, max_display_count
from databricks.koalas.metadata import Metadata
from databricks.koalas.missing.series import _MissingPandasLikeSeries
from databricks.koalas.selection import SparkDataFrameLocator
from databricks.koalas.utils import validate_arguments_and_invoke_function
def _column_op(f):
"""
A decorator that wraps APIs taking/returning Spark Column so that Koalas Series can be
supported too. If this decorator is used for the `f` function that takes Spark Column and
returns Spark Column, decorated `f` takes Koalas Series as well and returns Koalas
Series.
:param f: a function that takes Spark Column and returns Spark Column.
:param self: Koalas Series
:param args: arguments that the function `f` takes.
"""
@wraps(f)
def wrapper(self, *args):
assert all((not isinstance(arg, Series)) or (arg._kdf is self._kdf) for arg in args), \
"Cannot combine column argument because it comes from a different dataframe"
# It is possible for the function `f` takes other arguments than Spark Column.
# To cover this case, explicitly check if the argument is Koalas Series and
# extract Spark Column. For other arguments, they are used as are.
args = [arg._scol if isinstance(arg, Series) else arg for arg in args]
scol = f(self._scol, *args)
return Series(scol, anchor=self._kdf, index=self._index_map)
return wrapper
def _numpy_column_op(f):
@wraps(f)
def wrapper(self, *args):
# PySpark does not support NumPy type out of the box. For now, we convert NumPy types
# into some primitive types understandable in PySpark.
new_args = []
for arg in args:
# TODO: This is a quick hack to support NumPy type. We should revisit this.
if isinstance(self.spark_type, LongType) and isinstance(arg, np.timedelta64):
new_args.append(float(arg / np.timedelta64(1, 's')))
else:
new_args.append(arg)
return _column_op(f)(self, *new_args)
return wrapper
class Series(_Frame):
"""
Koala Series that corresponds to Pandas Series logically. This holds Spark Column
internally.
:ivar _scol: Spark Column instance
:type _scol: pyspark.Column
:ivar _kdf: Parent's Koalas DataFrame
:type _kdf: ks.DataFrame
:ivar _index_map: Each pair holds the index field name which exists in Spark fields,
and the index name.
Parameters
----------
data : array-like, dict, or scalar value, Pandas Series or Spark Column
Contains data stored in Series
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas Series, other arguments should not be used.
If `data` is a Spark Column, all other arguments except `index` should not be used.
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
If `data` is a Spark DataFrame, `index` is expected to be `Metadata`s `index_map`.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
"""
def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False,
anchor=None):
if isinstance(data, pd.Series):
assert index is None
assert dtype is None
assert name is None
assert not copy
assert anchor is None
assert not fastpath
self._init_from_pandas(data)
elif isinstance(data, spark.Column):
assert dtype is None
assert name is None
assert not copy
assert not fastpath
self._init_from_spark(data, anchor, index)
else:
s = pd.Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath)
self._init_from_pandas(s)
def _init_from_pandas(self, s):
"""
Creates Koalas Series from Pandas Series.
:param s: Pandas Series
"""
kdf = DataFrame(pd.DataFrame(s))
self._init_from_spark(kdf._sdf[kdf._metadata.data_columns[0]],
kdf, kdf._metadata.index_map)
def _init_from_spark(self, scol, kdf, index_map):
"""
Creates Koalas Series from Spark Column.
:param scol: Spark Column
:param kdf: Koalas DataFrame that should have the `scol`.
:param index_map: index information of this Series.
"""
assert index_map is not None
assert kdf is not None
assert isinstance(kdf, ks.DataFrame), type(kdf)
self._scol = scol
self._kdf = kdf
self._index_map = index_map
# arithmetic operators
__neg__ = _column_op(spark.Column.__neg__)
def __add__(self, other):
if isinstance(self.spark_type, StringType):
# Concatenate string columns
if isinstance(other, Series) and isinstance(other.spark_type, StringType):
return _column_op(F.concat)(self, other)
# Handle df['col'] + 'literal'
elif isinstance(other, str):
return _column_op(F.concat)(self, F.lit(other))
else:
raise TypeError('string addition can only be applied to string series or literals.')
else:
return _column_op(spark.Column.__add__)(self, other)
def __sub__(self, other):
# Note that timestamp subtraction casts arguments to integer. This is to mimic Pandas's
# behaviors. Pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.
if isinstance(other, Series) and isinstance(self.spark_type, TimestampType):
if not isinstance(other.spark_type, TimestampType):
raise TypeError('datetime subtraction can only be applied to datetime series.')
return self.astype('bigint') - other.astype('bigint')
else:
return _column_op(spark.Column.__sub__)(self, other)
__mul__ = _column_op(spark.Column.__mul__)
__div__ = _numpy_column_op(spark.Column.__div__)
__truediv__ = _numpy_column_op(spark.Column.__truediv__)
__mod__ = _column_op(spark.Column.__mod__)
def __radd__(self, other):
# Handle 'literal' + df['col']
if isinstance(self.spark_type, StringType) and isinstance(other, str):
return Series(F.concat(F.lit(other), self._scol), anchor=self._kdf,
index=self._index_map)
else:
return _column_op(spark.Column.__radd__)(self, other)
__rsub__ = _column_op(spark.Column.__rsub__)
__rmul__ = _column_op(spark.Column.__rmul__)
__rdiv__ = _numpy_column_op(spark.Column.__rdiv__)
__rtruediv__ = _numpy_column_op(spark.Column.__rtruediv__)
__rmod__ = _column_op(spark.Column.__rmod__)
__pow__ = _column_op(spark.Column.__pow__)
__rpow__ = _column_op(spark.Column.__rpow__)
# logistic operators
__eq__ = _column_op(spark.Column.__eq__)
__ne__ = _column_op(spark.Column.__ne__)
__lt__ = _column_op(spark.Column.__lt__)
__le__ = _column_op(spark.Column.__le__)
__ge__ = _column_op(spark.Column.__ge__)
__gt__ = _column_op(spark.Column.__gt__)
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
__and__ = _column_op(spark.Column.__and__)
__or__ = _column_op(spark.Column.__or__)
__invert__ = _column_op(spark.Column.__invert__)
__rand__ = _column_op(spark.Column.__rand__)
__ror__ = _column_op(spark.Column.__ror__)
@property
def dtype(self):
"""Return the dtype object of the underlying data.
Examples
--------
>>> s = ks.Series([1, 2, 3])
>>> s.dtype
dtype('int64')
>>> s = ks.Series(list('abc'))
>>> s.dtype
dtype('O')
>>> s = ks.Series(pd.date_range('20130101', periods=3))
>>> s.dtype
dtype('<M8[ns]')
"""
if type(self.spark_type) == TimestampType:
return np.dtype('datetime64[ns]')
else:
return np.dtype(to_arrow_type(self.spark_type).to_pandas_dtype())
@property
def spark_type(self):
""" Returns the data type as defined by Spark, as a Spark DataType object."""
return self.schema.fields[-1].dataType
def astype(self, dtype):
from databricks.koalas.typedef import as_spark_type
spark_type = as_spark_type(dtype)
if not spark_type:
raise ValueError("Type {} not understood".format(dtype))
return Series(self._scol.cast(spark_type), anchor=self._kdf, index=self._index_map)
def getField(self, name):
if not isinstance(self.schema, StructType):
raise AttributeError("Not a struct: {}".format(self.schema))
else:
fnames = self.schema.fieldNames()
if name not in fnames:
raise AttributeError(
"Field {} not found, possible values are {}".format(name, ", ".join(fnames)))
return Series(self._scol.getField(name), anchor=self._kdf, index=self._index_map)
def alias(self, name):
"""An alias for :meth:`Series.rename`."""
return self.rename(name)
@property
def schema(self):
return self.to_dataframe()._sdf.schema
@property
def shape(self):
"""Return a tuple of the shape of the underlying data."""
return len(self),
@property
def name(self):
return self._metadata.data_columns[0]
@name.setter
def name(self, name):
self.rename(name, inplace=True)
# TODO: Functionality and documentation should be matched. Currently, changing index labels
# taking dictionary and function to change index are not supported.
def rename(self, index=None, **kwargs):
"""
Alter Series name.
Parameters
----------
index : scalar
Scalar will alter the ``Series.name`` attribute.
inplace : bool, default False
Whether to return a new Series. If True then value of copy is
ignored.
Returns
-------
Series
Series with name altered.
Examples
--------
>>> s = ks.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
Name: 0, dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
"""
if index is None:
return self
scol = self._scol.alias(index)
if kwargs.get('inplace', False):
self._scol = scol
return self
else:
return Series(scol, anchor=self._kdf, index=self._index_map)
@property
def _metadata(self):
return self.to_dataframe()._metadata
@property
def index(self):
"""The index (axis labels) Column of the Series.
Currently supported only when the DataFrame has a single index.
"""
if len(self._metadata.index_map) != 1:
raise KeyError('Currently supported only when the Column has a single index.')
return self._kdf.index
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column,
or when the index is meaningless and needs to be reset
to the default before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels from the index.
Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in the new DataFrame.
name : object, optional
The name to use for the column containing the original Series values.
Uses self.name by default. This argument is ignored when drop is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
Returns
-------
Series or DataFrame
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
Examples
--------
>>> s = ks.Series([1, 2, 3, 4], name='foo',
... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx foo
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
To update the Series in place, without generating a new one
set `inplace` to True. Note that it also requires ``drop=True``.
>>> s.reset_index(inplace=True, drop=True)
>>> s
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
"""
if inplace and not drop:
raise TypeError('Cannot reset_index inplace on a Series to create a DataFrame')
if name is not None:
kdf = self.rename(name).to_dataframe()
else:
kdf = self.to_dataframe()
kdf = kdf.reset_index(level=level, drop=drop)
if drop:
s = _col(kdf)
if inplace:
self._kdf = kdf
self._scol = s._scol
self._index_map = s._index_map
else:
return s
else:
return kdf
@property
def loc(self):
return SparkDataFrameLocator(self)
def to_dataframe(self):
sdf = self._kdf._sdf.select([field for field, _ in self._index_map] + [self._scol])
metadata = Metadata(data_columns=[sdf.schema[-1].name], index_map=self._index_map)
return DataFrame(sdf, metadata)
def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True,
index=True, length=False, dtype=False, name=False,
max_rows=None):
"""
Render a string representation of the Series.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
buffer to write to
na_rep : string, optional
string representation of NAN to use, default 'NaN'
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
header : boolean, default True
Add the Series header (index name)
index : bool, optional
Add index (row) labels, default True
length : boolean, default False
Add the Series length
dtype : boolean, default False
Add the Series dtype
name : boolean, default False
Add the Series name if not None
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
Returns
-------
formatted : string (if not buffer passed)
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> print(df['dogs'].to_string())
0 0.2
1 0.0
2 0.6
3 0.2
>>> print(df['dogs'].to_string(max_rows=2))
0 0.2
1 0.0
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kseries = self.head(max_rows)
else:
kseries = self
return validate_arguments_and_invoke_function(
kseries.to_pandas(), self.to_string, pd.Series.to_string, args)
def to_clipboard(self, excel=True, sep=None, **kwargs):
# Docstring defined below by reusing DataFrame.to_clipboard's.
args = locals()
kseries = self
return validate_arguments_and_invoke_function(
kseries.to_pandas(), self.to_clipboard, pd.Series.to_clipboard, args)
to_clipboard.__doc__ = DataFrame.to_clipboard.__doc__
def to_dict(self, into=dict):
"""
Convert Series to {label -> value} dict or dict-like object.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> s = ks.Series([1, 2, 3, 4])
>>> s_dict = s.to_dict()
>>> sorted(s_dict.items())
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd) # doctest: +ELLIPSIS
defaultdict(<class 'list'>, {...})
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kseries = self
return validate_arguments_and_invoke_function(
kseries.to_pandas(), self.to_dict, pd.Series.to_dict, args)
def to_pandas(self):
"""
Return a pandas Series.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> df['dogs'].to_pandas()
0 0.2
1 0.0
2 0.6
3 0.2
Name: dogs, dtype: float64
"""
return _col(self.to_dataframe().toPandas())
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def isnull(self):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values. Characters such as empty strings '' or
numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
Returns
-------
Series : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
>>> ser = ks.Series([5, 6, np.NaN])
>>> ser.isna() # doctest: +NORMALIZE_WHITESPACE
0 False
1 False
2 True
Name: ((0 IS NULL) OR isnan(0)), dtype: bool
"""
if isinstance(self.schema[self.name].dataType, (FloatType, DoubleType)):
return Series(self._scol.isNull() | F.isnan(self._scol), anchor=self._kdf,
index=self._index_map)
else:
return Series(self._scol.isNull(), anchor=self._kdf, index=self._index_map)
isna = isnull
def notnull(self):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True.
Characters such as empty strings '' or numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
NA values, such as None or numpy.NaN, get mapped to False values.
Returns
-------
Series : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
return ~self.isnull()
notna = notnull
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Return a new Series with missing values removed.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
**kwargs
Not in use.
Returns
-------
Series
Series with NA entries dropped from it.
Examples
--------
>>> ser = ks.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
Name: 0, dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
Name: 0, dtype: float64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1.0
1 2.0
Name: 0, dtype: float64
"""
# TODO: last two examples from Pandas produce different results.
ks = _col(self.to_dataframe().dropna(axis=axis, inplace=False))
if inplace:
self._kdf = ks._kdf
self._scol = ks._scol
else:
return ks
def head(self, n=5):
"""
Return the first n rows.
This function returns the first n rows for the object based on position.
It is useful for quickly testing if your object has the right type of data in it.
Parameters
----------
n : Integer, default = 5
Returns
-------
The first n rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion']})
>>> df.animal.head(2) # doctest: +NORMALIZE_WHITESPACE
0 alligator
1 bee
Name: animal, dtype: object
"""
return _col(self.to_dataframe().head(n))
# TODO: Categorical type isn't supported (due to PySpark's limitation) and
# some doctests related with timestamps were not added.
def unique(self):
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
.. note:: This method returns newly creased Series whereas Pandas returns
the unique values as a NumPy array.
Returns
-------
Returns the unique values as a Series.
See Examples section.
Examples
--------
>>> ks.Series([2, 1, 3, 3], name='A').unique()
0 1
1 3
2 2
Name: A, dtype: int64
>>> ks.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
0 2016-01-01
Name: 0, dtype: datetime64[ns]
"""
sdf = self.to_dataframe()._sdf
return _col(DataFrame(sdf.select(self._scol).distinct()))
# TODO: Update Documentation for Bins Parameter when its supported
def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : Not Yet Supported
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
Examples
--------
>>> df = ks.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]})
>>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
Name: x, dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE
1.0 0.6
0.0 0.4
Name: x, dtype: float64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
NaN 1
Name: x, dtype: int64
"""
if bins is not None:
raise NotImplementedError("value_counts currently does not support bins")
if dropna:
sdf_dropna = self._kdf._sdf.filter(self.notna()._scol)
else:
sdf_dropna = self._kdf._sdf
sdf = sdf_dropna.groupby(self._scol).count()
if sort:
if ascending:
sdf = sdf.orderBy(F.col('count'))
else:
sdf = sdf.orderBy(F.col('count').desc())
if normalize:
sum = sdf_dropna.count()
sdf = sdf.withColumn('count', F.col('count') / F.lit(sum))
index_name = 'index' if self.name != 'index' else 'level_0'
kdf = DataFrame(sdf)
kdf.columns = [index_name, self.name]
kdf._metadata = Metadata(data_columns=[self.name], index_map=[(index_name, None)])
return _col(kdf)
def isin(self, values):
"""
Check whether `values` are contained in Series.
Return a boolean Series showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : list or set
The sequence of values to test.
Returns
-------
isin : Series (bool dtype)
Examples
--------
>>> s = ks.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
"""
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
return Series(self._scol.isin(list(values)).alias(self.name), anchor=self._kdf,
index=self._index_map)
def corr(self, other, method='pearson'):
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
correlation : float
Examples
--------
>>> df = ks.DataFrame({'s1': [.2, .0, .6, .2],
... 's2': [.3, .6, .0, .1]})
>>> s1 = df.s1
>>> s2 = df.s2
>>> s1.corr(s2, method='pearson') # doctest: +ELLIPSIS
-0.851064...
>>> s1.corr(s2, method='spearman') # doctest: +ELLIPSIS
-0.948683...
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
# This implementation is suboptimal because it computes more than necessary,
# but it should be a start
df = self._kdf.assign(corr_arg1=self, corr_arg2=other)[["corr_arg1", "corr_arg2"]]
c = df.corr(method=method)
return c.loc["corr_arg1", "corr_arg2"]
def count(self):
"""
Return number of non-NA/null observations in the Series.
Returns
-------
nobs : int
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26]})
Notice the uncounted NA values:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(_Frame._count_expr)
def apply(self, func, args=(), **kwds):
"""
Invoke function on values of Series.
Can be a Python function that only works on the Series.
.. note:: unlike pandas, it is required for `func` to specify return type hint.
Parameters
----------
func : function
Python function to apply. Note that type hint for return type is required.
args : tuple
Positional arguments passed to func after the series value.
**kwds
Additional keyword arguments passed to func.
Returns
-------
Series
Examples
--------
Create a Series with typical summer temperatures for each city.
>>> s = ks.Series([20, 21, 12],
... index=['London', 'New York', 'Helsinki'])
>>> s
London 20
New York 21
Helsinki 12
Name: 0, dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x) -> np.int64:
... return x ** 2
>>> s.apply(square)
London 400
New York 441
Helsinki 144
Name: square(0), dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword
>>> def subtract_custom_value(x, custom_value) -> np.int64:
... return x - custom_value
>>> s.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
Name: subtract_custom_value(0), dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``
>>> def add_custom_values(x, **kwargs) -> np.int64:
... for month in kwargs:
... x += kwargs[month]
... return x
>>> s.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
Name: add_custom_values(0), dtype: int64
Use a function from the Numpy library
>>> def numpy_log(col) -> np.float64:
... return np.log(col)
>>> s.apply(numpy_log)
London 2.995732
New York 3.044522
Helsinki 2.484907
Name: numpy_log(0), dtype: float64
"""
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
if return_sig is None:
raise ValueError("Given function must have return type hint; however, not found.")
apply_each = wraps(func)(lambda s, *a, **k: s.apply(func, args=a, **k))
wrapped = ks.pandas_wraps(return_col=return_sig)(apply_each)
return wrapped(self, *args, **kwds)
def _reduce_for_stat_function(self, sfun):
from inspect import signature
num_args = len(signature(sfun).parameters)
col_sdf = self._scol
col_type = self.schema[self.name].dataType
if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'):
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
return _unpack_scalar(self._kdf._sdf.select(col_sdf))
def __len__(self):
return len(self.to_dataframe())
def __getitem__(self, key):
return Series(self._scol.__getitem__(key), anchor=self._kdf, index=self._index_map)
def __getattr__(self, item: str) -> Any:
if item.startswith("__") or item.startswith("_pandas_") or item.startswith("_spark_"):
raise AttributeError(item)
if hasattr(_MissingPandasLikeSeries, item):
property_or_func = getattr(_MissingPandasLikeSeries, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
return self.getField(item)
def __str__(self):
return self._pandas_orig_repr()
def __repr__(self):
return repr(self.head(max_display_count).to_pandas())
def __dir__(self):
if not isinstance(self.schema, StructType):
fields = []
else:
fields = [f for f in self.schema.fieldNames() if ' ' not in f]
return super(Series, self).__dir__() + fields
def _pandas_orig_repr(self):
# TODO: figure out how to reuse the original one.
return 'Column<%s>' % self._scol._jc.toString().encode('utf8')
def _unpack_scalar(sdf):
"""
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
"""
l = sdf.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row.asDict().values())
assert len(l2) == 1, (row, l2)
return l2[0]
def _col(df):
assert isinstance(df, (DataFrame, pd.DataFrame)), type(df)
return df[df.columns[0]]
| 1 | 9,214 | It's fine for this PR, but in the future try to minimize unrelated changes. | databricks-koalas | py |
@@ -202,11 +202,11 @@ class Ansible(base.Base):
::
ANSIBLE_ROLES_PATH:
- $ephemeral_directory/roles/:$project_directory/../
+ $ephemeral_directory/roles/:$project_directory/../:$HOME/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
ANSIBLE_LIBRARY:
- $ephemeral_directory/library/:$project_directory/library/
+ $ephemeral_directory/modules/:$project_directory/library/:$HOME/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
ANSIBLE_FILTER_PLUGINS:
- $ephemeral_directory/plugins/filters/:$project_directory/filter/plugins/
+ $ephemeral_directory/plugins/filter/:$project_directory/filter/plugins/:$HOME/.ansible/plugins/filter:/usr/share/ansible/plugins/modules
Environment variables can be passed to the provisioner. Variables in this
section which match the names above will be appened to the above defaults, | 1 | # Copyright (c) 2015-2018 Cisco Systems, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import copy
import collections
import os
import shutil
from molecule import logger
from molecule import util
from molecule.provisioner import base
from molecule.provisioner import ansible_playbook
from molecule.provisioner import ansible_playbooks
LOG = logger.get_logger(__name__)
class Ansible(base.Base):
"""
`Ansible`_ is the default provisioner. No other provisioner will be
supported.
Molecule's provisioner manages the instances lifecycle. However, the user
must provide the create, destroy, and converge playbooks. Molecule's
``init`` subcommand will provide the necessary files for convenience.
Molecule will skip tasks which are tagged with either `molecule-notest` or
`notest`. With the tag `molecule-idempotence-notest` tasks are only
skipped during the idempotence action step.
.. important::
Reserve the create and destroy playbooks for provisioning. Do not
attempt to gather facts or perform operations on the provisioned nodes
inside these playbooks. Due to the gymnastics necessary to sync state
between Ansible and Molecule, it is best to perform these tasks in the
prepare or converge playbooks.
It is the developers responsiblity to properly map the modules's fact
data into the instance_conf_dict fact in the create playbook. This
allows Molecule to properly configure Ansible inventory.
Additional options can be passed to ``ansible-playbook`` through the options
dict. Any option set in this section will override the defaults.
.. important::
Options do not affect the create and destroy actions.
.. note::
Molecule will remove any options matching '^[v]+$', and pass ``-vvv``
to the underlying ``ansible-playbook`` command when executing
`molecule --debug`.
Molecule will silence log output, unless invoked with the ``--debug`` flag.
However, this results in quite a bit of output. To enable Ansible log
output, add the following to the ``provisioner`` section of ``molecule.yml``.
.. code-block:: yaml
provisioner:
name: ansible
log: True
The create/destroy playbooks for Docker and Vagrant are bundled with
Molecule. These playbooks have a clean API from `molecule.yml`, and
are the most commonly used. The bundled playbooks can still be overriden.
The playbook loading order is:
1. provisioner.playbooks.$driver_name.$action
2. provisioner.playbooks.$action
3. bundled_playbook.$driver_name.$action
.. code-block:: yaml
provisioner:
name: ansible
options:
vvv: True
playbooks:
create: create.yml
converge: playbook.yml
destroy: destroy.yml
Share playbooks between roles.
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
create: ../default/create.yml
destroy: ../default/destroy.yml
converge: playbook.yml
Multiple driver playbooks. In some situations a developer may choose to
test the same role against different backends. Molecule will choose driver
specific create/destroy playbooks, if the determined driver has a key in
the playbooks section of the provisioner's dict.
.. important::
If the determined driver has a key in the playbooks dict, Molecule will
use this dict to resolve all provisioning playbooks (create/destroy).
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
docker:
create: create.yml
destroy: destroy.yml
create: create.yml
destroy: destroy.yml
converge: playbook.yml
.. important::
Paths in this section are converted to absolute paths, where the
relative parent is the $scenario_directory.
The side effect playbook executes actions which produce side effects to the
instances(s). Intended to test HA failover scenarios or the like. It is
not enabled by default. Add the following to the provisioner's ``playbooks``
section to enable.
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
side_effect: side_effect.yml
.. important::
This feature should be considered experimental.
The prepare playbook executes actions which bring the system to a given
state prior to converge. It is executed after create, and only once for
the duration of the instances life.
This can be used to bring instances into a particular state, prior to
testing.
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
prepare: prepare.yml
The cleanup playbook is for cleaning up test infrastructure that may not
be present on the instance that will be destroyed. The primary use-case
is for "cleaning up" changes that were made outside of Molecule's test
environment. For example, remote database connections or user accounts.
Intended to be used in conjunction with `prepare` to modify external
resources when required.
The cleanup step is executed directly before every destroy step. Just like
the destroy step, it will be run twice. An initial clean before converge
and then a clean before the last destroy step. This means that the cleanup
playbook must handle failures to cleanup resources which have not
been created yet.
Add the following to the provisioner's `playbooks` section
to enable.
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
cleanup: cleanup.yml
.. important::
This feature should be considered experimental.
Environment variables. Molecule does its best to handle common Ansible
paths. The defaults are as follows.
::
ANSIBLE_ROLES_PATH:
$ephemeral_directory/roles/:$project_directory/../
ANSIBLE_LIBRARY:
$ephemeral_directory/library/:$project_directory/library/
ANSIBLE_FILTER_PLUGINS:
$ephemeral_directory/plugins/filters/:$project_directory/filter/plugins/
Environment variables can be passed to the provisioner. Variables in this
section which match the names above will be appened to the above defaults,
and converted to absolute paths, where the relative parent is the
$scenario_directory.
.. important::
Paths in this section are converted to absolute paths, where the
relative parent is the $scenario_directory.
.. code-block:: yaml
provisioner:
name: ansible
env:
FOO: bar
Modifying ansible.cfg.
.. code-block:: yaml
provisioner:
name: ansible
config_options:
defaults:
fact_caching: jsonfile
ssh_connection:
scp_if_ssh: True
.. important::
The following keys are disallowed to prevent Molecule from
improperly functioning. They can be specified through the
provisioner's env setting described above, with the exception
of the `privilege_escalation`.
.. code-block:: yaml
provisioner:
name: ansible
config_options:
defaults:
roles_path: /path/to/roles_path
library: /path/to/library
filter_plugins: /path/to/filter_plugins
privilege_escalation: {}
Roles which require host/groups to have certain variables set. Molecule
uses the same `variables defined in a playbook`_ syntax as `Ansible`_.
.. code-block:: yaml
provisioner:
name: ansible
inventory:
group_vars:
foo1:
foo: bar
foo2:
foo: bar
baz:
qux: zzyzx
host_vars:
foo1-01:
foo: bar
Molecule automatically generates the inventory based on the hosts defined
under `Platforms`_. Using the ``hosts`` key allows to add extra hosts to
the inventory that are not managed by Molecule.
A typical use case is if you want to access some variables from another
host in the inventory (using hostvars) without creating it.
.. note::
The content of ``hosts`` should follow the YAML based inventory syntax:
start with the ``all`` group and have hosts/vars/children entries.
.. code-block:: yaml
provisioner:
name: ansible
inventory:
hosts:
all:
extra_host:
foo: hello
.. important::
The extra hosts added to the inventory using this key won't be
created/destroyed by Molecule. It is the developers responsibility
to target the proper hosts in the playbook. Only the hosts defined
under `Platforms`_ should be targetted instead of ``all``.
An alternative to the above is symlinking. Molecule creates symlinks to
the specified directory in the inventory directory. This allows ansible to
converge utilizing its built in host/group_vars resolution. These two
forms of inventory management are mutually exclusive.
Like above, it is possible to pass an additional inventory file
(or even dynamic inventory script), using the ``hosts`` key. `Ansible`_ will
automatically merge this inventory with the one generated by molecule.
This can be useful if you want to define extra hosts that are not managed
by Molecule.
.. important::
Again, it is the developers responsibility to target the proper hosts
in the playbook. Only the hosts defined under
`Platforms`_ should be targetted instead of ``all``.
.. note::
The source directory linking is relative to the scenario's
directory.
The only valid keys are ``hosts``, ``group_vars`` and ``host_vars``. Molecule's
schema validator will enforce this.
.. code-block:: yaml
provisioner:
name: ansible
inventory:
links:
hosts: ../../../inventory/hosts
group_vars: ../../../inventory/group_vars/
host_vars: ../../../inventory/host_vars/
Override connection options:
.. code-block:: yaml
provisioner:
name: ansible
connection_options:
ansible_ssh_user: foo
ansible_ssh_common_args: -o IdentitiesOnly=no
.. _`variables defined in a playbook`: https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#defining-variables-in-a-playbook
Add arguments to ansible-playbook when running converge:
.. code-block:: yaml
provisioner:
name: ansible
ansible_args:
- --inventory=mygroups.yml
- --limit=host1,host2
""" # noqa
def __init__(self, config):
"""
Initialize a new ansible class and returns None.
:param config: An instance of a Molecule config.
:return: None
"""
super(Ansible, self).__init__(config)
@property
def default_config_options(self):
"""
Default options provided to construct ansible.cfg and returns a dict.
:return: dict
"""
return {
'defaults': {
'ansible_managed': 'Ansible managed: Do NOT edit this file manually!',
'retry_files_enabled': False,
'host_key_checking': False,
'nocows': 1,
},
'ssh_connection': {
'scp_if_ssh': True,
'control_path': '%(directory)s/%%h-%%p-%%r',
},
}
@property
def default_options(self):
d = {'skip-tags': 'molecule-notest,notest'}
if self._config.action == 'idempotence':
d['skip-tags'] += ',molecule-idempotence-notest'
if self._config.debug:
d['vvv'] = True
d['diff'] = True
return d
@property
def default_env(self):
env = util.merge_dicts(os.environ.copy(), self._config.env)
env = util.merge_dicts(
env,
{
'ANSIBLE_CONFIG': self._config.provisioner.config_file,
'ANSIBLE_ROLES_PATH': ':'.join(
[
util.abs_path(
os.path.join(
self._config.scenario.ephemeral_directory, 'roles'
)
),
util.abs_path(
os.path.join(self._config.project_directory, os.path.pardir)
),
]
),
'ANSIBLE_LIBRARY': ':'.join(
[
self._get_libraries_directory(),
util.abs_path(
os.path.join(
self._config.scenario.ephemeral_directory, 'library'
)
),
util.abs_path(
os.path.join(self._config.project_directory, 'library')
),
]
),
'ANSIBLE_FILTER_PLUGINS': ':'.join(
[
self._get_filter_plugin_directory(),
util.abs_path(
os.path.join(
self._config.scenario.ephemeral_directory,
'plugins',
'filters',
)
),
util.abs_path(
os.path.join(
self._config.project_directory, 'plugins', 'filters'
)
),
]
),
},
)
env = util.merge_dicts(env, self._config.env)
return env
@property
def name(self):
return self._config.config['provisioner']['name']
@property
def ansible_args(self):
return self._config.config['provisioner']['ansible_args']
@property
def config_options(self):
return util.merge_dicts(
self.default_config_options,
self._config.config['provisioner']['config_options'],
)
@property
def options(self):
if self._config.action in ['create', 'destroy']:
return self.default_options
o = self._config.config['provisioner']['options']
# NOTE(retr0h): Remove verbose options added by the user while in
# debug.
if self._config.debug:
o = util.filter_verbose_permutation(o)
return util.merge_dicts(self.default_options, o)
@property
def env(self):
default_env = self.default_env
env = self._config.config['provisioner']['env'].copy()
# ensure that all keys and values are strings
env = {str(k): str(v) for k, v in env.items()}
roles_path = default_env['ANSIBLE_ROLES_PATH']
library_path = default_env['ANSIBLE_LIBRARY']
filter_plugins_path = default_env['ANSIBLE_FILTER_PLUGINS']
try:
path = self._absolute_path_for(env, 'ANSIBLE_ROLES_PATH')
roles_path = '{}:{}'.format(roles_path, path)
except KeyError:
pass
try:
path = self._absolute_path_for(env, 'ANSIBLE_LIBRARY')
library_path = '{}:{}'.format(library_path, path)
except KeyError:
pass
try:
path = self._absolute_path_for(env, 'ANSIBLE_FILTER_PLUGINS')
filter_plugins_path = '{}:{}'.format(filter_plugins_path, path)
except KeyError:
pass
env['ANSIBLE_ROLES_PATH'] = roles_path
env['ANSIBLE_LIBRARY'] = library_path
env['ANSIBLE_FILTER_PLUGINS'] = filter_plugins_path
return util.merge_dicts(default_env, env)
@property
def hosts(self):
return self._config.config['provisioner']['inventory']['hosts']
@property
def host_vars(self):
return self._config.config['provisioner']['inventory']['host_vars']
@property
def group_vars(self):
return self._config.config['provisioner']['inventory']['group_vars']
@property
def links(self):
return self._config.config['provisioner']['inventory']['links']
@property
def inventory(self):
"""
Create an inventory structure and returns a dict.
.. code-block:: yaml
ungrouped:
vars:
foo: bar
hosts:
instance-1:
instance-2:
children:
$child_group_name:
hosts:
instance-1:
instance-2:
$group_name:
hosts:
instance-1:
ansible_connection: docker
instance-2:
ansible_connection: docker
:return: str
"""
dd = self._vivify()
for platform in self._config.platforms.instances:
for group in platform.get('groups', ['ungrouped']):
instance_name = platform['name']
connection_options = self.connection_options(instance_name)
molecule_vars = {
'molecule_file': "{{ lookup('env', 'MOLECULE_FILE') }}",
'molecule_ephemeral_directory': "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}",
'molecule_scenario_directory': "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}",
'molecule_yml': "{{ lookup('file', molecule_file) | molecule_from_yaml }}",
'molecule_instance_config': "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}",
'molecule_no_log': "{{ lookup('env', 'MOLECULE_NO_LOG') or not "
"molecule_yml.provisioner.log|default(False) | bool }}",
}
# All group
dd['all']['hosts'][instance_name] = connection_options
dd['all']['vars'] = molecule_vars
# Named group
dd[group]['hosts'][instance_name] = connection_options
dd[group]['vars'] = molecule_vars
# Ungrouped
dd['ungrouped']['vars'] = {}
# Children
for child_group in platform.get('children', []):
dd[group]['children'][child_group]['hosts'][
instance_name
] = connection_options
return self._default_to_regular(dd)
@property
def inventory_directory(self):
return self._config.scenario.inventory_directory
@property
def inventory_file(self):
return os.path.join(self.inventory_directory, 'ansible_inventory.yml')
@property
def config_file(self):
return os.path.join(self._config.scenario.ephemeral_directory, 'ansible.cfg')
@property
@util.memoize
def playbooks(self):
return ansible_playbooks.AnsiblePlaybooks(self._config)
@property
def directory(self):
return os.path.join(
os.path.dirname(__file__),
os.path.pardir,
os.path.pardir,
'molecule',
'provisioner',
'ansible',
)
def cleanup(self):
"""
Executes `ansible-playbook` against the cleanup playbook and returns
None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.cleanup)
pb.execute()
def connection_options(self, instance_name):
d = self._config.driver.ansible_connection_options(instance_name)
return util.merge_dicts(
d, self._config.config['provisioner']['connection_options']
)
def check(self):
"""
Executes ``ansible-playbook`` against the converge playbook with the
``--check`` flag and returns None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.converge)
pb.add_cli_arg('check', True)
pb.execute()
def converge(self, playbook=None, **kwargs):
"""
Executes ``ansible-playbook`` against the converge playbook unless
specified otherwise and returns a string.
:param playbook: An optional string containing an absolute path to a
playbook.
:param kwargs: An optional keyword arguments.
:return: str
"""
if playbook is None:
pb = self._get_ansible_playbook(self.playbooks.converge, **kwargs)
else:
pb = self._get_ansible_playbook(playbook, **kwargs)
return pb.execute()
def destroy(self):
"""
Executes ``ansible-playbook`` against the destroy playbook and returns
None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.destroy)
pb.execute()
def side_effect(self):
"""
Executes ``ansible-playbook`` against the side_effect playbook and
returns None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.side_effect)
pb.execute()
def create(self):
"""
Executes ``ansible-playbook`` against the create playbook and returns
None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.create)
pb.execute()
def prepare(self):
"""
Executes ``ansible-playbook`` against the prepare playbook and returns
None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.prepare)
pb.execute()
def syntax(self):
"""
Executes ``ansible-playbook`` against the converge playbook with the
``-syntax-check`` flag and returns None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.converge)
pb.add_cli_arg('syntax-check', True)
pb.execute()
def verify(self):
"""
Executes ``ansible-playbook`` against the verify playbook and returns
None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.verify)
pb.execute()
def write_config(self):
"""
Writes the provisioner's config file to disk and returns None.
:return: None
"""
template = util.render_template(
self._get_config_template(), config_options=self.config_options
)
util.write_file(self.config_file, template)
def manage_inventory(self):
"""
Manages inventory for Ansible and returns None.
:returns: None
"""
self._write_inventory()
self._remove_vars()
if not self.links:
self._add_or_update_vars()
else:
self._link_or_update_vars()
def abs_path(self, path):
return util.abs_path(os.path.join(self._config.scenario.directory, path))
def _add_or_update_vars(self):
"""
Creates host and/or group vars and returns None.
:returns: None
"""
# Create the hosts extra inventory source (only if not empty)
hosts_file = os.path.join(self.inventory_directory, 'hosts')
if self.hosts:
util.write_file(hosts_file, util.safe_dump(self.hosts))
# Create the host_vars and group_vars directories
for target in ['host_vars', 'group_vars']:
if target == 'host_vars':
vars_target = copy.deepcopy(self.host_vars)
for instance_name, _ in self.host_vars.items():
if instance_name == 'localhost':
instance_key = instance_name
else:
instance_key = instance_name
vars_target[instance_key] = vars_target.pop(instance_name)
elif target == 'group_vars':
vars_target = self.group_vars
if vars_target:
target_vars_directory = os.path.join(self.inventory_directory, target)
if not os.path.isdir(util.abs_path(target_vars_directory)):
os.mkdir(util.abs_path(target_vars_directory))
for target in vars_target.keys():
target_var_content = vars_target[target]
path = os.path.join(util.abs_path(target_vars_directory), target)
util.write_file(path, util.safe_dump(target_var_content))
def _write_inventory(self):
"""
Writes the provisioner's inventory file to disk and returns None.
:return: None
"""
self._verify_inventory()
util.write_file(self.inventory_file, util.safe_dump(self.inventory))
def _remove_vars(self):
"""
Remove hosts/host_vars/group_vars and returns None.
:returns: None
"""
for name in ("hosts", "group_vars", "host_vars"):
d = os.path.join(self.inventory_directory, name)
if os.path.islink(d) or os.path.isfile(d):
os.unlink(d)
elif os.path.isdir(d):
shutil.rmtree(d)
def _link_or_update_vars(self):
"""
Creates or updates the symlink to group_vars and returns None.
:returns: None
"""
for d, source in self.links.items():
target = os.path.join(self.inventory_directory, d)
source = os.path.join(self._config.scenario.directory, source)
if not os.path.exists(source):
msg = "The source path '{}' does not exist.".format(source)
util.sysexit_with_message(msg)
msg = "Inventory {} linked to {}".format(source, target)
LOG.info(msg)
os.symlink(source, target)
def _get_ansible_playbook(self, playbook, **kwargs):
"""
Get an instance of AnsiblePlaybook and returns it.
:param playbook: A string containing an absolute path to a
provisioner's playbook.
:param kwargs: An optional keyword arguments.
:return: object
"""
return ansible_playbook.AnsiblePlaybook(playbook, self._config, **kwargs)
def _verify_inventory(self):
"""
Verify the inventory is valid and returns None.
:return: None
"""
if not self.inventory:
msg = "Instances missing from the 'platform' " "section of molecule.yml."
util.sysexit_with_message(msg)
def _get_config_template(self):
"""
Returns a config template string.
:return: str
"""
return """
{% for section, section_dict in config_options.items() -%}
[{{ section }}]
{% for k, v in section_dict.items() -%}
{{ k }} = {{ v }}
{% endfor -%}
{% endfor -%}
""".strip()
def _vivify(self):
"""
Returns an autovivification default dict.
:return: dict
"""
return collections.defaultdict(self._vivify)
def _default_to_regular(self, d):
if isinstance(d, collections.defaultdict):
d = {k: self._default_to_regular(v) for k, v in d.items()}
return d
def _get_plugin_directory(self):
return os.path.join(self.directory, 'plugins')
def _get_libraries_directory(self):
return util.abs_path(os.path.join(self._get_plugin_directory(), 'libraries'))
def _get_filter_plugin_directory(self):
return util.abs_path(os.path.join(self._get_plugin_directory(), 'filters'))
def _absolute_path_for(self, env, key):
return ':'.join([self.abs_path(p) for p in env[key].split(':')])
| 1 | 10,032 | The library -> modules and filters -> filter path changes are breaking changes or? | ansible-community-molecule | py |
@@ -1,6 +1,9 @@
import logging
+import os
from typing import TYPE_CHECKING
+import boto3
+import botocore.config
import pytest
from localstack.utils.aws import aws_stack | 1 | import logging
from typing import TYPE_CHECKING
import pytest
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_stack import create_dynamodb_table
from localstack.utils.common import short_uid
if TYPE_CHECKING:
from mypy_boto3_dynamodb import DynamoDBClient
from mypy_boto3_s3 import S3Client
from mypy_boto3_sns import SNSClient
from mypy_boto3_sqs import SQSClient
LOG = logging.getLogger(__name__)
def _client(service):
return aws_stack.connect_to_service(service)
@pytest.fixture(scope="class")
def dynamodb_client() -> "DynamoDBClient":
return _client("dynamodb")
@pytest.fixture(scope="class")
def s3_client() -> "S3Client":
return _client("s3")
@pytest.fixture(scope="class")
def sqs_client() -> "SQSClient":
return _client("sqs")
@pytest.fixture(scope="class")
def sns_client() -> "SNSClient":
return _client("sns")
@pytest.fixture
def dynamodb_create_table(dynamodb_client):
tables = list()
def factory(**kwargs):
kwargs["client"] = dynamodb_client
if "table_name" not in kwargs:
kwargs["table_name"] = "test-table-%s" % short_uid()
if "partition_key" not in kwargs:
kwargs["partition_key"] = "id"
kwargs["sleep_after"] = 0
tables.append(kwargs["table_name"])
return create_dynamodb_table(**kwargs)
yield factory
# cleanup
for table in tables:
try:
dynamodb_client.delete_table(TableName=table)
except Exception as e:
LOG.debug("error cleaning up table %s: %s", table, e)
@pytest.fixture
def s3_create_bucket(s3_client):
buckets = list()
def factory(**kwargs) -> str:
if "Bucket" not in kwargs:
kwargs["Bucket"] = "test-bucket-%s" % short_uid()
s3_client.create_bucket(**kwargs)
buckets.append(kwargs["Bucket"])
return kwargs["Bucket"]
yield factory
# cleanup
for bucket in buckets:
try:
s3_client.delete_bucket(Bucket=bucket)
except Exception as e:
LOG.debug("error cleaning up bucket %s: %s", bucket, e)
@pytest.fixture
def s3_bucket(s3_create_bucket) -> str:
return s3_create_bucket()
@pytest.fixture
def sqs_create_queue(sqs_client):
queue_urls = list()
def factory(**kwargs):
if "QueueName" not in kwargs:
kwargs["QueueName"] = "test-queue-%s" % short_uid()
response = sqs_client.create_queue(QueueName=kwargs["QueueName"])
url = response["QueueUrl"]
queue_urls.append(url)
return sqs_client.get_queue_attributes(QueueUrl=url)
yield factory
# cleanup
for queue_url in queue_urls:
try:
sqs_client.delete_queue(QueueUrl=queue_url)
except Exception as e:
LOG.debug("error cleaning up queue %s: %s", queue_url, e)
@pytest.fixture
def sqs_queue(sqs_create_queue):
return sqs_create_queue()
@pytest.fixture
def sns_topic(sns_client):
# TODO: add fixture factories
topic_name = "test-topic-%s" % short_uid()
response = sns_client.create_topic(Name=topic_name)
topic_arn = response["TopicArn"]
yield sns_client.get_topic_attributes(TopicArn=topic_arn)
sns_client.delete_topic(TopicArn=topic_arn)
| 1 | 12,884 | Can we encapsulate this check (`os.environ.get("TEST_TARGET") == "AWS_CLOUD"`) into a small config/util function? (and also use it in `integration/conftest.py`) (I'd generally try to avoid accessing `os.environ` directly across the codebase. I'm aware that we're already doing this in a few places, but maybe we can start pulling things out - this will also help us create more systematic config/environment abstractions as we go...) | localstack-localstack | py |
@@ -22,7 +22,9 @@ export function h(nodeName, attributes) {
}
while (stack.length) {
if ((child = stack.pop()) && child.pop!==undefined) {
- for (i=child.length; i--; ) stack.push(child[i]);
+ [].concat(child).reverse().forEach((item) => {
+ stack.push(item);
+ });
}
else {
if (child===true || child===false) child = null; | 1 | import { VNode } from './vnode';
import options from './options';
const stack = [];
const EMPTY_CHILDREN = [];
/** JSX/hyperscript reviver
* Benchmarks: https://esbench.com/bench/57ee8f8e330ab09900a1a1a0
* @see http://jasonformat.com/wtf-is-jsx
* @public
*/
export function h(nodeName, attributes) {
let children=EMPTY_CHILDREN, lastSimple, child, simple, i;
for (i=arguments.length; i-- > 2; ) {
stack.push(arguments[i]);
}
if (attributes && attributes.children!=null) {
if (!stack.length) stack.push(attributes.children);
delete attributes.children;
}
while (stack.length) {
if ((child = stack.pop()) && child.pop!==undefined) {
for (i=child.length; i--; ) stack.push(child[i]);
}
else {
if (child===true || child===false) child = null;
if ((simple = typeof nodeName!=='function')) {
if (child==null) child = '';
else if (typeof child==='number') child = String(child);
else if (typeof child!=='string') simple = false;
}
if (simple && lastSimple) {
children[children.length-1] += child;
}
else if (children===EMPTY_CHILDREN) {
children = [child];
}
else {
children.push(child);
}
lastSimple = simple;
}
}
let p = new VNode();
p.nodeName = nodeName;
p.children = children;
p.attributes = attributes==null ? undefined : attributes;
p.key = attributes==null ? undefined : attributes.key;
// if a "vnode hook" is defined, pass every created VNode to it
if (options.vnode!==undefined) options.vnode(p);
return p;
}
| 1 | 10,988 | An `if` statement would be better here. Functions, concat and reverse are all very expensive. | preactjs-preact | js |
@@ -156,3 +156,12 @@ func (c *factory) ERC20Address(ctx context.Context) (common.Address, error) {
}
return erc20Address, nil
}
+
+// DiscoverFactoryAddress returns the canonical factory for this chainID
+func DiscoverFactoryAddress(chainID int64) (common.Address, bool) {
+ if chainID == 5 {
+ // goerli
+ return common.HexToAddress("0x334394E8c891E77b1449084aaDD659920BB25247"), true
+ }
+ return common.Address{}, false
+} | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chequebook
import (
"bytes"
"errors"
"math/big"
"strings"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethersphere/sw3-bindings/v2/simpleswapfactory"
"golang.org/x/net/context"
)
var (
ErrInvalidFactory = errors.New("not a valid factory contract")
ErrNotDeployedByFactory = errors.New("chequebook not deployed by factory")
)
// Factory is the main interface for interacting with the chequebook factory.
type Factory interface {
// ERC20Address returns the token for which this factory deploys chequebooks.
ERC20Address(ctx context.Context) (common.Address, error)
// Deploy deploys a new chequebook and returns once confirmed.
Deploy(ctx context.Context, issuer common.Address, defaultHardDepositTimeoutDuration *big.Int) (common.Address, error)
// VerifyBytecode checks that the factory is valid.
VerifyBytecode(ctx context.Context) error
// VerifyChequebook checks that the supplied chequebook has been deployed by this factory.
VerifyChequebook(ctx context.Context, chequebook common.Address) error
}
type factory struct {
backend Backend
transactionService TransactionService
address common.Address
ABI abi.ABI
instance SimpleSwapFactoryBinding
}
// NewFactory creates a new factory service for the provided factory contract.
func NewFactory(backend Backend, transactionService TransactionService, address common.Address, simpleSwapFactoryBindingFunc SimpleSwapFactoryBindingFunc) (Factory, error) {
ABI, err := abi.JSON(strings.NewReader(simpleswapfactory.SimpleSwapFactoryABI))
if err != nil {
return nil, err
}
instance, err := simpleSwapFactoryBindingFunc(address, backend)
if err != nil {
return nil, err
}
return &factory{
backend: backend,
transactionService: transactionService,
address: address,
ABI: ABI,
instance: instance,
}, nil
}
// Deploy deploys a new chequebook and returns once confirmed.
func (c *factory) Deploy(ctx context.Context, issuer common.Address, defaultHardDepositTimeoutDuration *big.Int) (common.Address, error) {
callData, err := c.ABI.Pack("deploySimpleSwap", issuer, big.NewInt(0).Set(defaultHardDepositTimeoutDuration))
if err != nil {
return common.Address{}, err
}
request := &TxRequest{
To: c.address,
Data: callData,
GasPrice: nil,
GasLimit: 0,
Value: big.NewInt(0),
}
txHash, err := c.transactionService.Send(ctx, request)
if err != nil {
return common.Address{}, err
}
receipt, err := c.transactionService.WaitForReceipt(ctx, txHash)
if err != nil {
return common.Address{}, err
}
chequebookAddress, err := c.parseDeployReceipt(receipt)
if err != nil {
return common.Address{}, err
}
return chequebookAddress, nil
}
// parseDeployReceipt parses the address of the deployed chequebook from the receipt.
func (c *factory) parseDeployReceipt(receipt *types.Receipt) (address common.Address, err error) {
if receipt.Status != 1 {
return common.Address{}, ErrTransactionReverted
}
for _, log := range receipt.Logs {
if log.Address != c.address {
continue
}
if event, err := c.instance.ParseSimpleSwapDeployed(*log); err == nil {
address = event.ContractAddress
break
}
}
if (address == common.Address{}) {
return common.Address{}, errors.New("contract deployment failed")
}
return address, nil
}
// VerifyBytecode checks that the factory is valid.
func (c *factory) VerifyBytecode(ctx context.Context) (err error) {
code, err := c.backend.CodeAt(ctx, c.address, nil)
if err != nil {
return err
}
referenceCode := common.FromHex(simpleswapfactory.SimpleSwapFactoryDeployedCode)
if !bytes.Equal(code, referenceCode) {
return ErrInvalidFactory
}
return nil
}
// VerifyChequebook checks that the supplied chequebook has been deployed by this factory.
func (c *factory) VerifyChequebook(ctx context.Context, chequebook common.Address) error {
deployed, err := c.instance.DeployedContracts(&bind.CallOpts{
Context: ctx,
}, chequebook)
if err != nil {
return err
}
if !deployed {
return ErrNotDeployedByFactory
}
return nil
}
// ERC20Address returns the token for which this factory deploys chequebooks.
func (c *factory) ERC20Address(ctx context.Context) (common.Address, error) {
erc20Address, err := c.instance.ERC20Address(&bind.CallOpts{
Context: ctx,
})
if err != nil {
return common.Address{}, err
}
return erc20Address, nil
}
| 1 | 12,588 | `Log here: connect to default factory for goerli network <address>` | ethersphere-bee | go |
@@ -125,9 +125,11 @@ func (e *Executor) reportRequiringApproval(ctx context.Context) {
var approvers []string
- for _, v := range ds.GenericDeploymentConfig.DeploymentNotification.Mentions {
- if v.Event == "DEPLOYMENT_WAIT_APPROVAL" {
- approvers = v.Slack
+ if ds.GenericDeploymentConfig.DeploymentNotification != nil {
+ for _, v := range ds.GenericDeploymentConfig.DeploymentNotification.Mentions {
+ if e := "EVENT_" + v.Event; e == model.NotificationEventType_EVENT_DEPLOYMENT_WAIT_APPROVAL.String() {
+ approvers = v.Slack
+ }
}
}
| 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package waitapproval
import (
"context"
"time"
"go.uber.org/zap"
"github.com/pipe-cd/pipe/pkg/app/piped/executor"
"github.com/pipe-cd/pipe/pkg/model"
)
const (
approvedByKey = "ApprovedBy"
)
type Executor struct {
executor.Input
}
type registerer interface {
Register(stage model.Stage, f executor.Factory) error
}
// Register registers this executor factory into a given registerer.
func Register(r registerer) {
f := func(in executor.Input) executor.Executor {
return &Executor{
Input: in,
}
}
r.Register(model.StageWaitApproval, f)
}
// Execute starts waiting until an approval from one of the specified users.
func (e *Executor) Execute(sig executor.StopSignal) model.StageStatus {
var (
originalStatus = e.Stage.Status
ctx = sig.Context()
ticker = time.NewTicker(5 * time.Second)
)
defer ticker.Stop()
timeout := e.StageConfig.WaitApprovalStageOptions.Timeout.Duration()
timer := time.NewTimer(timeout)
e.reportRequiringApproval(ctx)
e.LogPersister.Info("Waiting for an approval...")
for {
select {
case <-ticker.C:
if commander, ok := e.checkApproval(ctx); ok {
e.LogPersister.Infof("Got an approval from %s", commander)
return model.StageStatus_STAGE_SUCCESS
}
case s := <-sig.Ch():
switch s {
case executor.StopSignalCancel:
return model.StageStatus_STAGE_CANCELLED
case executor.StopSignalTerminate:
return originalStatus
default:
return model.StageStatus_STAGE_FAILURE
}
case <-timer.C:
e.LogPersister.Errorf("Timed out %v", timeout)
return model.StageStatus_STAGE_FAILURE
}
}
}
func (e *Executor) checkApproval(ctx context.Context) (string, bool) {
var approveCmd *model.ReportableCommand
commands := e.CommandLister.ListCommands()
for i, cmd := range commands {
if cmd.GetApproveStage() != nil {
approveCmd = &commands[i]
break
}
}
if approveCmd == nil {
return "", false
}
metadata := map[string]string{
approvedByKey: approveCmd.Commander,
}
if ori, ok := e.MetadataStore.GetStageMetadata(e.Stage.Id); ok {
for k, v := range ori {
metadata[k] = v
}
}
if err := e.MetadataStore.SetStageMetadata(ctx, e.Stage.Id, metadata); err != nil {
e.LogPersister.Errorf("Unabled to save approver information to deployment, %v", err)
return "", false
}
if err := approveCmd.Report(ctx, model.CommandStatus_COMMAND_SUCCEEDED, nil, nil); err != nil {
e.Logger.Error("failed to report handled command", zap.Error(err))
}
return approveCmd.Commander, true
}
func (e *Executor) reportRequiringApproval(ctx context.Context) {
ds, err := e.TargetDSP.GetReadOnly(ctx, e.LogPersister)
if err != nil {
e.LogPersister.Errorf("Failed to prepare running deploy source data (%v)", err)
return
}
var approvers []string
for _, v := range ds.GenericDeploymentConfig.DeploymentNotification.Mentions {
if v.Event == "DEPLOYMENT_WAIT_APPROVAL" {
approvers = v.Slack
}
}
e.Notifier.Notify(model.NotificationEvent{
Type: model.NotificationEventType_EVENT_DEPLOYMENT_WAIT_APPROVAL,
Metadata: &model.NotificationEventDeploymentWaitApproval{
Deployment: e.Deployment,
EnvName: e.EnvName,
MentionedAccounts: approvers,
},
})
}
| 1 | 20,543 | nits, should add `break` to avoid redundant iterations. | pipe-cd-pipe | go |
@@ -56,6 +56,15 @@ namespace Nethermind.Store.Test
provider.Commit(SpuriousDragon.Instance);
Assert.False(provider.AccountExists(_address1));
}
+
+ [Test]
+ public void Eip_158_account_dont_exists_after_zero_value_transfer()
+ {
+ ISnapshotableDb stateDb = new StateDb(new MemDb());
+ StateProvider provider = new StateProvider(stateDb, Substitute.For<IDb>(), Logger);
+ provider.AddToBalance(_address1, 0, SpuriousDragon.Instance);
+ provider.AccountExists(_address1).Should().BeFalse();
+ }
[Test]
public void Eip_158_touch_zero_value_system_account_is_not_deleted() | 1 | /*
* Copyright (c) 2018 Demerzel Solutions Limited
* This file is part of the Nethermind library.
*
* The Nethermind library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The Nethermind library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
*/
using System;
using FluentAssertions;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Core.Extensions;
using Nethermind.Core.Specs;
using Nethermind.Core.Specs.Forks;
using Nethermind.Core.Test.Builders;
using Nethermind.Dirichlet.Numerics;
using Nethermind.Evm.Tracing;
using Nethermind.Logging;
using NSubstitute;
using NUnit.Framework;
namespace Nethermind.Store.Test
{
[TestFixture]
public class StateProviderTests
{
private static readonly Keccak Hash1 = Keccak.Compute("1");
private static readonly Keccak Hash2 = Keccak.Compute("2");
private readonly Address _address1 = new Address(Hash1);
private static readonly ILogManager Logger = LimboLogs.Instance;
[Test]
public void Eip_158_zero_value_transfer_deletes()
{
ISnapshotableDb stateDb = new StateDb(new MemDb());
StateProvider frontierProvider = new StateProvider(stateDb, Substitute.For<IDb>(), Logger);
frontierProvider.CreateAccount(_address1, 0);
frontierProvider.Commit(Frontier.Instance);
frontierProvider.CommitTree();
StateProvider provider = new StateProvider(stateDb, Substitute.For<IDb>(), Logger);
provider.StateRoot = frontierProvider.StateRoot;
provider.AddToBalance(_address1, 0, SpuriousDragon.Instance);
provider.Commit(SpuriousDragon.Instance);
Assert.False(provider.AccountExists(_address1));
}
[Test]
public void Eip_158_touch_zero_value_system_account_is_not_deleted()
{
StateProvider provider = new StateProvider(new StateDb(new MemDb()), Substitute.For<IDb>(), Logger);
var systemUser = Address.SystemUser;
provider.CreateAccount(systemUser, 0);
provider.Commit(Homestead.Instance);
var releaseSpec = new ReleaseSpec() {IsEip158Enabled = true};
provider.UpdateCodeHash(systemUser, Keccak.OfAnEmptyString, releaseSpec);
provider.Commit(releaseSpec);
provider.GetAccount(systemUser).Should().NotBeNull();
}
[Test]
public void Empty_commit_restore()
{
StateProvider provider = new StateProvider(new StateDb(new MemDb()), Substitute.For<IDb>(), Logger);
provider.Commit(Frontier.Instance);
provider.Restore(-1);
}
[Test]
public void Update_balance_on_non_existing_acccount_throws()
{
StateProvider provider = new StateProvider(new StateDb(new MemDb()), Substitute.For<IDb>(), Logger);
Assert.Throws<InvalidOperationException>(() => provider.AddToBalance(TestItem.AddressA, 1.Ether(), Olympic.Instance));
}
[Test]
public void Is_empty_account()
{
StateProvider provider = new StateProvider(new StateDb(new MemDb()), Substitute.For<IDb>(), Logger);
provider.CreateAccount(_address1, 0);
provider.Commit(Frontier.Instance);
Assert.True(provider.IsEmptyAccount(_address1));
}
[Test]
public void Restore_update_restore()
{
StateProvider provider = new StateProvider(new StateDb(new MemDb()), Substitute.For<IDb>(), Logger);
provider.CreateAccount(_address1, 0);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.Restore(4);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.Restore(4);
Assert.AreEqual((UInt256)4, provider.GetBalance(_address1));
}
[Test]
public void Keep_in_cache()
{
StateProvider provider = new StateProvider(new StateDb(new MemDb()), Substitute.For<IDb>(), Logger);
provider.CreateAccount(_address1, 0);
provider.Commit(Frontier.Instance);
provider.GetBalance(_address1);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.Restore(-1);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.Restore(-1);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.Restore(-1);
Assert.AreEqual(UInt256.Zero, provider.GetBalance(_address1));
}
[Test]
public void Restore_in_the_middle()
{
byte[] code = new byte[] {1};
StateProvider provider = new StateProvider(new StateDb(new MemDb()), Substitute.For<IDb>(), Logger);
provider.CreateAccount(_address1, 1);
provider.AddToBalance(_address1, 1, Frontier.Instance);
provider.IncrementNonce(_address1);
Keccak codeHash = provider.UpdateCode(new byte[] { 1 });
provider.UpdateCodeHash(_address1, codeHash, Frontier.Instance);
provider.UpdateStorageRoot(_address1, Hash2);
Assert.AreEqual(UInt256.One, provider.GetNonce(_address1));
Assert.AreEqual(UInt256.One + 1, provider.GetBalance(_address1));
Assert.AreEqual(code, provider.GetCode(_address1));
provider.Restore(4);
Assert.AreEqual(UInt256.One, provider.GetNonce(_address1));
Assert.AreEqual(UInt256.One + 1, provider.GetBalance(_address1));
Assert.AreEqual(code, provider.GetCode(_address1));
provider.Restore(3);
Assert.AreEqual(UInt256.One, provider.GetNonce(_address1));
Assert.AreEqual(UInt256.One + 1, provider.GetBalance(_address1));
Assert.AreEqual(code, provider.GetCode(_address1));
provider.Restore(2);
Assert.AreEqual(UInt256.One, provider.GetNonce(_address1));
Assert.AreEqual(UInt256.One + 1, provider.GetBalance(_address1));
Assert.AreEqual(new byte[0], provider.GetCode(_address1));
provider.Restore(1);
Assert.AreEqual(UInt256.Zero, provider.GetNonce(_address1));
Assert.AreEqual(UInt256.One + 1, provider.GetBalance(_address1));
Assert.AreEqual(new byte[0], provider.GetCode(_address1));
provider.Restore(0);
Assert.AreEqual(UInt256.Zero, provider.GetNonce(_address1));
Assert.AreEqual(UInt256.One, provider.GetBalance(_address1));
Assert.AreEqual(new byte[0], provider.GetCode(_address1));
provider.Restore(-1);
Assert.AreEqual(false, provider.AccountExists(_address1));
}
[Test(Description = "It was failing before as touch was marking the accounts as committed but not adding to trace list")]
public void Touch_empty_trace_does_not_throw()
{
ParityLikeTxTracer tracer = new ParityLikeTxTracer(Build.A.Block.TestObject, null, ParityTraceTypes.StateDiff);
StateProvider provider = new StateProvider(new StateDb(new MemDb()), Substitute.For<IDb>(), Logger);
provider.CreateAccount(_address1, 0);
Account account = provider.GetAccount(_address1);
Assert.True(account.IsEmpty);
provider.Commit(Frontier.Instance); // commit empty account (before the empty account fix in Spurious Dragon)
Assert.True(provider.AccountExists(_address1));
provider.Reset(); // clear all caches
provider.GetBalance(_address1); // justcache
provider.AddToBalance(_address1, 0, SpuriousDragon.Instance); // touch
Assert.DoesNotThrow(() => provider.Commit(SpuriousDragon.Instance, tracer));
}
}
} | 1 | 22,695 | I think we want an exception here, there should never be a transfer to a nonexisting account. | NethermindEth-nethermind | .cs |
@@ -35,7 +35,7 @@ public class Container {
private final ContainerId id;
public Container(Function<HttpRequest, HttpResponse> client, ContainerId id) {
- LOG.info("Created container " + id);
+ LOG.finest("Created container " + id);
this.client = Objects.requireNonNull(client);
this.id = Objects.requireNonNull(id);
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.docker;
import static org.openqa.selenium.remote.http.HttpMethod.DELETE;
import static org.openqa.selenium.remote.http.HttpMethod.POST;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import java.time.Duration;
import java.util.Objects;
import java.util.function.Function;
import java.util.logging.Logger;
public class Container {
public static final Logger LOG = Logger.getLogger(Container.class.getName());
private final Function<HttpRequest, HttpResponse> client;
private final ContainerId id;
public Container(Function<HttpRequest, HttpResponse> client, ContainerId id) {
LOG.info("Created container " + id);
this.client = Objects.requireNonNull(client);
this.id = Objects.requireNonNull(id);
}
public ContainerId getId() {
return id;
}
public void start() {
LOG.info("Starting " + getId());
client.apply(new HttpRequest(POST, String.format("/containers/%s/start", id)));
}
public void stop(Duration timeout) {
Objects.requireNonNull(timeout);
LOG.info("Stopping " + getId());
String seconds = String.valueOf(timeout.toMillis() / 1000);
HttpRequest request = new HttpRequest(POST, String.format("/containers/%s/stop", id));
request.addQueryParameter("t", seconds);
client.apply(request);
}
public void delete() {
LOG.info("Removing " + getId());
HttpRequest request = new HttpRequest(DELETE, "/containers/" + id);
client.apply(request);
}
}
| 1 | 16,458 | This code is new and not tested well. While we may drop the log level before we ship 4.0, right now this is extremely helpful to users. | SeleniumHQ-selenium | java |
@@ -79,11 +79,11 @@ class TestSklearn(unittest.TestCase):
'../../examples/lambdarank/rank.test.query'))
gbm = lgb.LGBMRanker()
gbm.fit(X_train, y_train, group=q_train, eval_set=[(X_test, y_test)],
- eval_group=[q_test], eval_at=[1, 3], early_stopping_rounds=5, verbose=False,
- callbacks=[lgb.reset_parameter(learning_rate=lambda x: 0.95 ** x * 0.1)])
- self.assertLessEqual(gbm.best_iteration_, 12)
- self.assertGreater(gbm.best_score_['valid_0']['ndcg@1'], 0.6173)
- self.assertGreater(gbm.best_score_['valid_0']['ndcg@3'], 0.6479)
+ eval_group=[q_test], eval_at=[1, 3], early_stopping_rounds=10, verbose=False,
+ callbacks=[lgb.reset_parameter(learning_rate=lambda x: max(0.01, 0.1 - 0.01 * x))])
+ self.assertLessEqual(gbm.best_iteration_, 25)
+ self.assertGreater(gbm.best_score_['valid_0']['ndcg@1'], 0.60)
+ self.assertGreater(gbm.best_score_['valid_0']['ndcg@3'], 0.60)
def test_regression_with_custom_objective(self):
def objective_ls(y_true, y_pred): | 1 | # coding: utf-8
# pylint: skip-file
import itertools
import math
import os
import unittest
import lightgbm as lgb
import numpy as np
from sklearn import __version__ as sk_version
from sklearn.base import clone
from sklearn.datasets import (load_boston, load_breast_cancer, load_digits,
load_iris, load_svmlight_file)
from sklearn.exceptions import SkipTestWarning
from sklearn.externals import joblib
from sklearn.metrics import log_loss, mean_squared_error
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.utils.estimator_checks import (_yield_all_checks, SkipTest,
check_parameters_default_constructible)
def multi_error(y_true, y_pred):
return np.mean(y_true != y_pred)
def multi_logloss(y_true, y_pred):
return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)])
def custom_asymmetric_obj(y_true, y_pred):
residual = (y_true - y_pred).astype("float")
grad = np.where(residual < 0, -2 * 10.0 * residual, -2 * residual)
hess = np.where(residual < 0, 2 * 10.0, 2.0)
return grad, hess
def mse(y_true, y_pred):
return 'custom MSE', mean_squared_error(y_true, y_pred), False
class TestSklearn(unittest.TestCase):
def test_binary(self):
X, y = load_breast_cancer(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMClassifier(n_estimators=50, silent=True)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False)
ret = log_loss(y_test, gbm.predict_proba(X_test))
self.assertLess(ret, 0.15)
self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['binary_logloss'][gbm.best_iteration_ - 1], places=5)
def test_regression(self):
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMRegressor(n_estimators=50, silent=True)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False)
ret = mean_squared_error(y_test, gbm.predict(X_test))
self.assertLess(ret, 16)
self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['l2'][gbm.best_iteration_ - 1], places=5)
def test_multiclass(self):
X, y = load_digits(10, True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMClassifier(n_estimators=50, silent=True)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False)
ret = multi_error(y_test, gbm.predict(X_test))
self.assertLess(ret, 0.2)
ret = multi_logloss(y_test, gbm.predict_proba(X_test))
self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['multi_logloss'][gbm.best_iteration_ - 1], places=5)
def test_lambdarank(self):
X_train, y_train = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../../examples/lambdarank/rank.train'))
X_test, y_test = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../../examples/lambdarank/rank.test'))
q_train = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../../examples/lambdarank/rank.train.query'))
q_test = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../../examples/lambdarank/rank.test.query'))
gbm = lgb.LGBMRanker()
gbm.fit(X_train, y_train, group=q_train, eval_set=[(X_test, y_test)],
eval_group=[q_test], eval_at=[1, 3], early_stopping_rounds=5, verbose=False,
callbacks=[lgb.reset_parameter(learning_rate=lambda x: 0.95 ** x * 0.1)])
self.assertLessEqual(gbm.best_iteration_, 12)
self.assertGreater(gbm.best_score_['valid_0']['ndcg@1'], 0.6173)
self.assertGreater(gbm.best_score_['valid_0']['ndcg@3'], 0.6479)
def test_regression_with_custom_objective(self):
def objective_ls(y_true, y_pred):
grad = (y_pred - y_true)
hess = np.ones(len(y_true))
return grad, hess
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMRegressor(n_estimators=50, silent=True, objective=objective_ls)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False)
ret = mean_squared_error(y_test, gbm.predict(X_test))
self.assertLess(ret, 100)
self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['l2'][gbm.best_iteration_ - 1], places=5)
def test_binary_classification_with_custom_objective(self):
def logregobj(y_true, y_pred):
y_pred = 1.0 / (1.0 + np.exp(-y_pred))
grad = y_pred - y_true
hess = y_pred * (1.0 - y_pred)
return grad, hess
def binary_error(y_test, y_pred):
return np.mean([int(p > 0.5) != y for y, p in zip(y_test, y_pred)])
X, y = load_digits(2, True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMClassifier(n_estimators=50, silent=True, objective=logregobj)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False)
ret = binary_error(y_test, gbm.predict(X_test))
self.assertLess(ret, 0.1)
def test_dart(self):
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMRegressor(boosting_type='dart')
gbm.fit(X_train, y_train)
self.assertLessEqual(gbm.score(X_train, y_train), 1.)
def test_grid_search(self):
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {'boosting_type': ['dart', 'gbdt'],
'n_estimators': [5, 8],
'drop_rate': [0.05, 0.1]}
gbm = GridSearchCV(lgb.LGBMRegressor(), params, cv=3)
gbm.fit(X_train, y_train)
self.assertIn(gbm.best_params_['n_estimators'], [5, 8])
def test_clone_and_property(self):
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMRegressor(n_estimators=100, silent=True)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=10, verbose=False)
gbm_clone = clone(gbm)
self.assertIsInstance(gbm.booster_, lgb.Booster)
self.assertIsInstance(gbm.feature_importances_, np.ndarray)
X, y = load_digits(2, True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
clf = lgb.LGBMClassifier()
clf.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=10, verbose=False)
self.assertListEqual(sorted(clf.classes_), [0, 1])
self.assertEqual(clf.n_classes_, 2)
self.assertIsInstance(clf.booster_, lgb.Booster)
self.assertIsInstance(clf.feature_importances_, np.ndarray)
def test_joblib(self):
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMRegressor(n_estimators=10, objective=custom_asymmetric_obj,
silent=True, importance_type='split')
gbm.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)],
eval_metric=mse, early_stopping_rounds=5, verbose=False,
callbacks=[lgb.reset_parameter(learning_rate=list(np.arange(1, 0, -0.1)))])
joblib.dump(gbm, 'lgb.pkl') # test model with custom functions
gbm_pickle = joblib.load('lgb.pkl')
self.assertIsInstance(gbm_pickle.booster_, lgb.Booster)
self.assertDictEqual(gbm.get_params(), gbm_pickle.get_params())
np.testing.assert_array_equal(gbm.feature_importances_, gbm_pickle.feature_importances_)
self.assertAlmostEqual(gbm_pickle.learning_rate, 0.1)
self.assertTrue(callable(gbm_pickle.objective))
for eval_set in gbm.evals_result_:
for metric in gbm.evals_result_[eval_set]:
np.testing.assert_allclose(gbm.evals_result_[eval_set][metric],
gbm_pickle.evals_result_[eval_set][metric])
pred_origin = gbm.predict(X_test)
pred_pickle = gbm_pickle.predict(X_test)
np.testing.assert_allclose(pred_origin, pred_pickle)
def test_feature_importances_single_leaf(self):
clf = lgb.LGBMClassifier(n_estimators=100)
data = load_iris()
clf.fit(data.data, data.target)
importances = clf.feature_importances_
self.assertEqual(len(importances), 4)
def test_feature_importances_type(self):
clf = lgb.LGBMClassifier(n_estimators=100)
data = load_iris()
clf.fit(data.data, data.target)
clf.set_params(importance_type='split')
importances_split = clf.feature_importances_
clf.set_params(importance_type='gain')
importances_gain = clf.feature_importances_
# Test that the largest element is NOT the same, the smallest can be the same, i.e. zero
importance_split_top1 = sorted(importances_split, reverse=True)[0]
importance_gain_top1 = sorted(importances_gain, reverse=True)[0]
self.assertNotEqual(importance_split_top1, importance_gain_top1)
# sklearn <0.19 cannot accept instance, but many tests could be passed only with min_data=1 and min_data_in_bin=1
@unittest.skipIf(sk_version < '0.19.0', 'scikit-learn version is less than 0.19')
def test_sklearn_integration(self):
# we cannot use `check_estimator` directly since there is no skip test mechanism
for name, estimator in ((lgb.sklearn.LGBMClassifier.__name__, lgb.sklearn.LGBMClassifier),
(lgb.sklearn.LGBMRegressor.__name__, lgb.sklearn.LGBMRegressor)):
check_parameters_default_constructible(name, estimator)
# we cannot leave default params (see https://github.com/microsoft/LightGBM/issues/833)
estimator = estimator(min_child_samples=1, min_data_in_bin=1)
for check in _yield_all_checks(name, estimator):
check_name = check.func.__name__ if hasattr(check, 'func') else check.__name__
if check_name == 'check_estimators_nan_inf':
continue # skip test because LightGBM deals with nan
try:
check(name, estimator)
except SkipTest as message:
warnings.warn(message, SkipTestWarning)
@unittest.skipIf(not lgb.compat.PANDAS_INSTALLED, 'pandas is not installed')
def test_pandas_categorical(self):
import pandas as pd
np.random.seed(42) # sometimes there is no difference how cols are treated (cat or not cat)
X = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'c', 'd'] * 75), # str
"B": np.random.permutation([1, 2, 3] * 100), # int
"C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60), # float
"D": np.random.permutation([True, False] * 150), # bool
"E": pd.Categorical(np.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60),
ordered=True)}) # str and ordered categorical
y = np.random.permutation([0, 1] * 150)
X_test = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'e'] * 20), # unseen category
"B": np.random.permutation([1, 3] * 30),
"C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
"D": np.random.permutation([True, False] * 30),
"E": pd.Categorical(pd.np.random.permutation(['z', 'y'] * 30),
ordered=True)})
np.random.seed() # reset seed
cat_cols_actual = ["A", "B", "C", "D"]
cat_cols_to_store = cat_cols_actual + ["E"]
X[cat_cols_actual] = X[cat_cols_actual].astype('category')
X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category')
cat_values = [X[col].cat.categories.tolist() for col in cat_cols_to_store]
gbm0 = lgb.sklearn.LGBMClassifier().fit(X, y)
pred0 = gbm0.predict(X_test, raw_score=True)
pred_prob = gbm0.predict_proba(X_test)[:, 1]
gbm1 = lgb.sklearn.LGBMClassifier().fit(X, pd.Series(y), categorical_feature=[0])
pred1 = gbm1.predict(X_test, raw_score=True)
gbm2 = lgb.sklearn.LGBMClassifier().fit(X, y, categorical_feature=['A'])
pred2 = gbm2.predict(X_test, raw_score=True)
gbm3 = lgb.sklearn.LGBMClassifier().fit(X, y, categorical_feature=['A', 'B', 'C', 'D'])
pred3 = gbm3.predict(X_test, raw_score=True)
gbm3.booster_.save_model('categorical.model')
gbm4 = lgb.Booster(model_file='categorical.model')
pred4 = gbm4.predict(X_test)
gbm5 = lgb.sklearn.LGBMClassifier().fit(X, y, categorical_feature=['E'])
pred5 = gbm5.predict(X_test, raw_score=True)
gbm6 = lgb.sklearn.LGBMClassifier().fit(X, y, categorical_feature=[])
pred6 = gbm6.predict(X_test, raw_score=True)
self.assertRaises(AssertionError,
np.testing.assert_allclose,
pred0, pred1)
self.assertRaises(AssertionError,
np.testing.assert_allclose,
pred0, pred2)
np.testing.assert_allclose(pred1, pred2)
np.testing.assert_allclose(pred0, pred3)
np.testing.assert_allclose(pred_prob, pred4)
self.assertRaises(AssertionError,
np.testing.assert_allclose,
pred0, pred5) # ordered cat features aren't treated as cat features by default
self.assertRaises(AssertionError,
np.testing.assert_allclose,
pred0, pred6)
self.assertListEqual(gbm0.booster_.pandas_categorical, cat_values)
self.assertListEqual(gbm1.booster_.pandas_categorical, cat_values)
self.assertListEqual(gbm2.booster_.pandas_categorical, cat_values)
self.assertListEqual(gbm3.booster_.pandas_categorical, cat_values)
self.assertListEqual(gbm4.pandas_categorical, cat_values)
self.assertListEqual(gbm5.booster_.pandas_categorical, cat_values)
self.assertListEqual(gbm6.booster_.pandas_categorical, cat_values)
@unittest.skipIf(not lgb.compat.PANDAS_INSTALLED, 'pandas is not installed')
def test_pandas_sparse(self):
import pandas as pd
X = pd.DataFrame({"A": pd.SparseArray(np.random.permutation([0, 1, 2] * 100)),
"B": pd.SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1, 0.2] * 60)),
"C": pd.SparseArray(np.random.permutation([True, False] * 150))})
y = pd.Series(pd.SparseArray(np.random.permutation([0, 1] * 150)))
X_test = pd.DataFrame({"A": pd.SparseArray(np.random.permutation([0, 2] * 30)),
"B": pd.SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1] * 15)),
"C": pd.SparseArray(np.random.permutation([True, False] * 30))})
if pd.__version__ >= '0.24.0':
for dtype in pd.concat([X.dtypes, X_test.dtypes, pd.Series(y.dtypes)]):
self.assertTrue(pd.api.types.is_sparse(dtype))
gbm = lgb.sklearn.LGBMClassifier().fit(X, y)
pred_sparse = gbm.predict(X_test, raw_score=True)
if hasattr(X_test, 'sparse'):
pred_dense = gbm.predict(X_test.sparse.to_dense(), raw_score=True)
else:
pred_dense = gbm.predict(X_test.to_dense(), raw_score=True)
np.testing.assert_allclose(pred_sparse, pred_dense)
def test_predict(self):
iris = load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
gbm = lgb.train({'objective': 'multiclass',
'num_class': 3,
'verbose': -1},
lgb.Dataset(X_train, y_train))
clf = lgb.LGBMClassifier(verbose=-1).fit(X_train, y_train)
# Tests same probabilities
res_engine = gbm.predict(X_test)
res_sklearn = clf.predict_proba(X_test)
np.testing.assert_allclose(res_engine, res_sklearn)
# Tests same predictions
res_engine = np.argmax(gbm.predict(X_test), axis=1)
res_sklearn = clf.predict(X_test)
np.testing.assert_equal(res_engine, res_sklearn)
# Tests same raw scores
res_engine = gbm.predict(X_test, raw_score=True)
res_sklearn = clf.predict(X_test, raw_score=True)
np.testing.assert_allclose(res_engine, res_sklearn)
# Tests same leaf indices
res_engine = gbm.predict(X_test, pred_leaf=True)
res_sklearn = clf.predict(X_test, pred_leaf=True)
np.testing.assert_equal(res_engine, res_sklearn)
# Tests same feature contributions
res_engine = gbm.predict(X_test, pred_contrib=True)
res_sklearn = clf.predict(X_test, pred_contrib=True)
np.testing.assert_allclose(res_engine, res_sklearn)
# Tests other parameters for the prediction works
res_engine = gbm.predict(X_test)
res_sklearn_params = clf.predict_proba(X_test,
pred_early_stop=True,
pred_early_stop_margin=1.0)
self.assertRaises(AssertionError,
np.testing.assert_allclose,
res_engine, res_sklearn_params)
def test_evaluate_train_set(self):
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMRegressor(n_estimators=10, silent=True)
gbm.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], verbose=False)
self.assertEqual(len(gbm.evals_result_), 2)
self.assertIn('training', gbm.evals_result_)
self.assertEqual(len(gbm.evals_result_['training']), 1)
self.assertIn('l2', gbm.evals_result_['training'])
self.assertIn('valid_1', gbm.evals_result_)
self.assertEqual(len(gbm.evals_result_['valid_1']), 1)
self.assertIn('l2', gbm.evals_result_['valid_1'])
def test_metrics(self):
def custom_obj(y_true, y_pred):
return np.zeros(y_true.shape), np.zeros(y_true.shape)
def custom_metric(y_true, y_pred):
return 'error', 0, False
X, y = load_boston(True)
params = {'n_estimators': 5, 'verbose': -1}
params_fit = {'X': X, 'y': y, 'eval_set': (X, y), 'verbose': False}
# no custom objective, no custom metric
# default metric
gbm = lgb.LGBMRegressor(**params).fit(**params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 1)
self.assertIn('l2', gbm.evals_result_['training'])
# non-default metric
gbm = lgb.LGBMRegressor(metric='mape', **params).fit(**params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 1)
self.assertIn('mape', gbm.evals_result_['training'])
# no metric
gbm = lgb.LGBMRegressor(metric='None', **params).fit(**params_fit)
self.assertIs(gbm.evals_result_, None)
# non-default metric in eval_metric
gbm = lgb.LGBMRegressor(**params).fit(eval_metric='mape', **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('l2', gbm.evals_result_['training'])
self.assertIn('mape', gbm.evals_result_['training'])
# non-default metric with non-default metric in eval_metric
gbm = lgb.LGBMRegressor(metric='gamma', **params).fit(eval_metric='mape', **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('gamma', gbm.evals_result_['training'])
self.assertIn('mape', gbm.evals_result_['training'])
# non-default metric with multiple metrics in eval_metric
gbm = lgb.LGBMRegressor(metric='gamma',
**params).fit(eval_metric=['l2', 'mape'], **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 3)
self.assertIn('gamma', gbm.evals_result_['training'])
self.assertIn('l2', gbm.evals_result_['training'])
self.assertIn('mape', gbm.evals_result_['training'])
# default metric for non-default objective
gbm = lgb.LGBMRegressor(objective='regression_l1', **params).fit(**params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 1)
self.assertIn('l1', gbm.evals_result_['training'])
# non-default metric for non-default objective
gbm = lgb.LGBMRegressor(objective='regression_l1', metric='mape',
**params).fit(**params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 1)
self.assertIn('mape', gbm.evals_result_['training'])
# no metric
gbm = lgb.LGBMRegressor(objective='regression_l1', metric='None',
**params).fit(**params_fit)
self.assertIs(gbm.evals_result_, None)
# non-default metric in eval_metric for non-default objective
gbm = lgb.LGBMRegressor(objective='regression_l1',
**params).fit(eval_metric='mape', **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('l1', gbm.evals_result_['training'])
self.assertIn('mape', gbm.evals_result_['training'])
# non-default metric with non-default metric in eval_metric for non-default objective
gbm = lgb.LGBMRegressor(objective='regression_l1', metric='gamma',
**params).fit(eval_metric='mape', **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('gamma', gbm.evals_result_['training'])
self.assertIn('mape', gbm.evals_result_['training'])
# non-default metric with multiple metrics in eval_metric for non-default objective
gbm = lgb.LGBMRegressor(objective='regression_l1', metric='gamma',
**params).fit(eval_metric=['l2', 'mape'], **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 3)
self.assertIn('gamma', gbm.evals_result_['training'])
self.assertIn('l2', gbm.evals_result_['training'])
self.assertIn('mape', gbm.evals_result_['training'])
# custom objective, no custom metric
# default regression metric for custom objective
gbm = lgb.LGBMRegressor(objective=custom_obj, **params).fit(**params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 1)
self.assertIn('l2', gbm.evals_result_['training'])
# non-default regression metric for custom objective
gbm = lgb.LGBMRegressor(objective=custom_obj, metric='mape', **params).fit(**params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 1)
self.assertIn('mape', gbm.evals_result_['training'])
# multiple regression metrics for custom objective
gbm = lgb.LGBMRegressor(objective=custom_obj, metric=['l1', 'gamma'],
**params).fit(**params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('l1', gbm.evals_result_['training'])
self.assertIn('gamma', gbm.evals_result_['training'])
# no metric
gbm = lgb.LGBMRegressor(objective=custom_obj, metric='None',
**params).fit(**params_fit)
self.assertIs(gbm.evals_result_, None)
# default regression metric with non-default metric in eval_metric for custom objective
gbm = lgb.LGBMRegressor(objective=custom_obj,
**params).fit(eval_metric='mape', **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('l2', gbm.evals_result_['training'])
self.assertIn('mape', gbm.evals_result_['training'])
# non-default regression metric with metric in eval_metric for custom objective
gbm = lgb.LGBMRegressor(objective=custom_obj, metric='mape',
**params).fit(eval_metric='gamma', **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('mape', gbm.evals_result_['training'])
self.assertIn('gamma', gbm.evals_result_['training'])
# multiple regression metrics with metric in eval_metric for custom objective
gbm = lgb.LGBMRegressor(objective=custom_obj, metric=['l1', 'gamma'],
**params).fit(eval_metric='l2', **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 3)
self.assertIn('l1', gbm.evals_result_['training'])
self.assertIn('gamma', gbm.evals_result_['training'])
self.assertIn('l2', gbm.evals_result_['training'])
# multiple regression metrics with multiple metrics in eval_metric for custom objective
gbm = lgb.LGBMRegressor(objective=custom_obj, metric=['l1', 'gamma'],
**params).fit(eval_metric=['l2', 'mape'], **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 4)
self.assertIn('l1', gbm.evals_result_['training'])
self.assertIn('gamma', gbm.evals_result_['training'])
self.assertIn('l2', gbm.evals_result_['training'])
self.assertIn('mape', gbm.evals_result_['training'])
# no custom objective, custom metric
# default metric with custom metric
gbm = lgb.LGBMRegressor(**params).fit(eval_metric=custom_metric, **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('l2', gbm.evals_result_['training'])
self.assertIn('error', gbm.evals_result_['training'])
# non-default metric with custom metric
gbm = lgb.LGBMRegressor(metric='mape',
**params).fit(eval_metric=custom_metric, **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('mape', gbm.evals_result_['training'])
self.assertIn('error', gbm.evals_result_['training'])
# multiple metrics with custom metric
gbm = lgb.LGBMRegressor(metric=['l1', 'gamma'],
**params).fit(eval_metric=custom_metric, **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 3)
self.assertIn('l1', gbm.evals_result_['training'])
self.assertIn('gamma', gbm.evals_result_['training'])
self.assertIn('error', gbm.evals_result_['training'])
# custom metric (disable default metric)
gbm = lgb.LGBMRegressor(metric='None',
**params).fit(eval_metric=custom_metric, **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 1)
self.assertIn('error', gbm.evals_result_['training'])
# default metric for non-default objective with custom metric
gbm = lgb.LGBMRegressor(objective='regression_l1',
**params).fit(eval_metric=custom_metric, **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('l1', gbm.evals_result_['training'])
self.assertIn('error', gbm.evals_result_['training'])
# non-default metric for non-default objective with custom metric
gbm = lgb.LGBMRegressor(objective='regression_l1', metric='mape',
**params).fit(eval_metric=custom_metric, **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('mape', gbm.evals_result_['training'])
self.assertIn('error', gbm.evals_result_['training'])
# multiple metrics for non-default objective with custom metric
gbm = lgb.LGBMRegressor(objective='regression_l1', metric=['l1', 'gamma'],
**params).fit(eval_metric=custom_metric, **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 3)
self.assertIn('l1', gbm.evals_result_['training'])
self.assertIn('gamma', gbm.evals_result_['training'])
self.assertIn('error', gbm.evals_result_['training'])
# custom metric (disable default metric for non-default objective)
gbm = lgb.LGBMRegressor(objective='regression_l1', metric='None',
**params).fit(eval_metric=custom_metric, **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 1)
self.assertIn('error', gbm.evals_result_['training'])
# custom objective, custom metric
# custom metric for custom objective
gbm = lgb.LGBMRegressor(objective=custom_obj,
**params).fit(eval_metric=custom_metric, **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 1)
self.assertIn('error', gbm.evals_result_['training'])
# non-default regression metric with custom metric for custom objective
gbm = lgb.LGBMRegressor(objective=custom_obj, metric='mape',
**params).fit(eval_metric=custom_metric, **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('mape', gbm.evals_result_['training'])
self.assertIn('error', gbm.evals_result_['training'])
# multiple regression metrics with custom metric for custom objective
gbm = lgb.LGBMRegressor(objective=custom_obj, metric=['l2', 'mape'],
**params).fit(eval_metric=custom_metric, **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 3)
self.assertIn('l2', gbm.evals_result_['training'])
self.assertIn('mape', gbm.evals_result_['training'])
self.assertIn('error', gbm.evals_result_['training'])
X, y = load_digits(3, True)
params_fit = {'X': X, 'y': y, 'eval_set': (X, y), 'verbose': False}
# default metric and invalid binary metric is replaced with multiclass alternative
gbm = lgb.LGBMClassifier(**params).fit(eval_metric='binary_error', **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('multi_logloss', gbm.evals_result_['training'])
self.assertIn('multi_error', gbm.evals_result_['training'])
# invalid objective is replaced with default multiclass one
# and invalid binary metric is replaced with multiclass alternative
gbm = lgb.LGBMClassifier(objective='invalid_obj',
**params).fit(eval_metric='binary_error', **params_fit)
self.assertEqual(gbm.objective_, 'multiclass')
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('multi_logloss', gbm.evals_result_['training'])
self.assertIn('multi_error', gbm.evals_result_['training'])
# default metric for non-default multiclass objective
# and invalid binary metric is replaced with multiclass alternative
gbm = lgb.LGBMClassifier(objective='ovr',
**params).fit(eval_metric='binary_error', **params_fit)
self.assertEqual(gbm.objective_, 'ovr')
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('multi_logloss', gbm.evals_result_['training'])
self.assertIn('multi_error', gbm.evals_result_['training'])
X, y = load_digits(2, True)
params_fit = {'X': X, 'y': y, 'eval_set': (X, y), 'verbose': False}
# default metric and invalid multiclass metric is replaced with binary alternative
gbm = lgb.LGBMClassifier(**params).fit(eval_metric='multi_error', **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 2)
self.assertIn('binary_logloss', gbm.evals_result_['training'])
self.assertIn('binary_error', gbm.evals_result_['training'])
# invalid multiclass metric is replaced with binary alternative for custom objective
gbm = lgb.LGBMClassifier(objective=custom_obj,
**params).fit(eval_metric='multi_logloss', **params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 1)
self.assertIn('binary_logloss', gbm.evals_result_['training'])
def test_inf_handle(self):
nrows = 1000
ncols = 10
X = np.random.randn(nrows, ncols)
y = np.random.randn(nrows) + np.full(nrows, 1e30)
weight = np.full(nrows, 1e10)
params = {'n_estimators': 20, 'verbose': -1}
params_fit = {'X': X, 'y': y, 'sample_weight': weight, 'eval_set': (X, y),
'verbose': False, 'early_stopping_rounds': 5}
gbm = lgb.LGBMRegressor(**params).fit(**params_fit)
np.testing.assert_allclose(gbm.evals_result_['training']['l2'], np.inf)
def test_nan_handle(self):
nrows = 1000
ncols = 10
X = np.random.randn(nrows, ncols)
y = np.random.randn(nrows) + np.full(nrows, 1e30)
weight = np.zeros(nrows)
params = {'n_estimators': 20, 'verbose': -1}
params_fit = {'X': X, 'y': y, 'sample_weight': weight, 'eval_set': (X, y),
'verbose': False, 'early_stopping_rounds': 5}
gbm = lgb.LGBMRegressor(**params).fit(**params_fit)
np.testing.assert_allclose(gbm.evals_result_['training']['l2'], np.nan)
def test_class_weight(self):
X, y = load_digits(10, True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
y_train_str = y_train.astype('str')
y_test_str = y_test.astype('str')
gbm = lgb.LGBMClassifier(n_estimators=10, class_weight='balanced', silent=True)
gbm.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_test, y_test), (X_test, y_test),
(X_test, y_test), (X_test, y_test)],
eval_class_weight=['balanced', None, 'balanced', {1: 10, 4: 20}, {5: 30, 2: 40}],
verbose=False)
for eval_set1, eval_set2 in itertools.combinations(gbm.evals_result_.keys(), 2):
for metric in gbm.evals_result_[eval_set1]:
np.testing.assert_raises(AssertionError,
np.testing.assert_allclose,
gbm.evals_result_[eval_set1][metric],
gbm.evals_result_[eval_set2][metric])
gbm_str = lgb.LGBMClassifier(n_estimators=10, class_weight='balanced', silent=True)
gbm_str.fit(X_train, y_train_str,
eval_set=[(X_train, y_train_str), (X_test, y_test_str),
(X_test, y_test_str), (X_test, y_test_str), (X_test, y_test_str)],
eval_class_weight=['balanced', None, 'balanced', {'1': 10, '4': 20}, {'5': 30, '2': 40}],
verbose=False)
for eval_set1, eval_set2 in itertools.combinations(gbm_str.evals_result_.keys(), 2):
for metric in gbm_str.evals_result_[eval_set1]:
np.testing.assert_raises(AssertionError,
np.testing.assert_allclose,
gbm_str.evals_result_[eval_set1][metric],
gbm_str.evals_result_[eval_set2][metric])
for eval_set in gbm.evals_result_:
for metric in gbm.evals_result_[eval_set]:
np.testing.assert_allclose(gbm.evals_result_[eval_set][metric],
gbm_str.evals_result_[eval_set][metric])
| 1 | 20,943 | Can we make these asserts more strict/precise? I remember this test helped to spot the issue of inconsistent results on different platforms due to using `sort` instead of `stable_sort`. | microsoft-LightGBM | cpp |
@@ -87,6 +87,10 @@ module Bolt
impl
end
+ def select_interpreter(executable, interpreters)
+ interpreters[Pathname(executable).extname] if interpreters
+ end
+
def reject_transport_options(target, options)
if target.options['run-as']
options.reject { |k, _v| k == '_run_as' } | 1 | # frozen_string_literal: true
require 'logging'
require 'bolt/result'
module Bolt
module Transport
# This class provides the default behavior for Transports. A Transport is
# responsible for uploading files and running commands, scripts, and tasks
# on Targets.
#
# Bolt executes work on the Transport in "batches". To do that, it calls
# the batches() method, which is responsible for dividing the list of
# Targets into batches according to how it wants to handle them. It will
# then call Transport#batch_task, or the corresponding method for another
# operation, passing a list of Targets. The Transport returns a list of
# Bolt::Result objects, one per Target. Each batch is executed on a
# separate thread, controlled by the `concurrency` setting, so many batches
# may be running in parallel.
#
# The default batch implementation splits the list of Targets into batches
# of 1. It then calls run_task(), or a corresponding method for other
# operations, passing in the single Target.
#
# Most Transport implementations, like the SSH and WinRM transports, don't
# need to do their own batching, since they only operate on a single Target
# at a time. Those Transports can implement the run_task() and related
# methods, which will automatically handle running many Targets in
# parallel, and will handle publishing start and finish events for each
# Target.
#
# Transports that need their own batching, like the Orch transport, can
# instead override the batches() method to split Targets into sets that can
# be executed together, and override the batch_task() and related methods
# to execute a batch of nodes. In that case, those Transports should accept
# a block argument and call it with a :node_start event for each Target
# before executing, and a :node_result event for each Target after
# execution.
class Base
STDIN_METHODS = %w[both stdin].freeze
ENVIRONMENT_METHODS = %w[both environment].freeze
attr_reader :logger
# Returns options this transport supports
def self.options
raise NotImplementedError,
"self.options() or self.filter_options(unfiltered) must be implemented by the transport class"
end
def self.filter_options(unfiltered)
unfiltered.select { |k| options.include?(k) }
end
def self.validate(_options)
raise NotImplementedError, "self.validate() must be implemented by the transport class"
end
def initialize
@logger = Logging.logger[self]
end
def with_events(target, callback)
callback&.call(type: :node_start, target: target)
result = begin
yield
rescue StandardError, NotImplementedError => ex
Bolt::Result.from_exception(target, ex)
end
callback&.call(type: :node_result, result: result)
result
end
def provided_features
[]
end
def default_input_method(_executable)
'both'
end
def select_implementation(target, task)
impl = task.select_implementation(target, provided_features)
impl['input_method'] ||= default_input_method(impl['path'])
impl
end
def reject_transport_options(target, options)
if target.options['run-as']
options.reject { |k, _v| k == '_run_as' }
else
options
end
end
private :reject_transport_options
# Transform a parameter map to an environment variable map, with parameter names prefixed
# with 'PT_' and values transformed to JSON unless they're strings.
def envify_params(params)
params.each_with_object({}) do |(k, v), h|
v = v.to_json unless v.is_a?(String)
h["PT_#{k}"] = v
end
end
# Raises an error if more than one target was given in the batch.
#
# The default implementations of batch_* strictly assume the transport is
# using the default batch size of 1. This method ensures that is the
# case and raises an error if it's not.
def assert_batch_size_one(method, targets)
if targets.length > 1
message = "#{self.class.name} must implement #{method} to support batches (got #{targets.length} nodes)"
raise NotImplementedError, message
end
end
# Runs the given task on a batch of nodes.
#
# The default implementation only supports batches of size 1 and will fail otherwise.
#
# Transports may override this method to implement their own batch processing.
def batch_task(targets, task, arguments, options = {}, &callback)
assert_batch_size_one("batch_task()", targets)
target = targets.first
with_events(target, callback) do
@logger.debug { "Running task run '#{task}' on #{target.uri}" }
run_task(target, task, arguments, reject_transport_options(target, options))
end
end
# Runs the given command on a batch of nodes.
#
# The default implementation only supports batches of size 1 and will fail otherwise.
#
# Transports may override this method to implement their own batch processing.
def batch_command(targets, command, options = {}, &callback)
assert_batch_size_one("batch_command()", targets)
target = targets.first
with_events(target, callback) do
@logger.debug("Running command '#{command}' on #{target.uri}")
run_command(target, command, reject_transport_options(target, options))
end
end
# Runs the given script on a batch of nodes.
#
# The default implementation only supports batches of size 1 and will fail otherwise.
#
# Transports may override this method to implement their own batch processing.
def batch_script(targets, script, arguments, options = {}, &callback)
assert_batch_size_one("batch_script()", targets)
target = targets.first
with_events(target, callback) do
@logger.debug { "Running script '#{script}' on #{target.uri}" }
run_script(target, script, arguments, reject_transport_options(target, options))
end
end
# Uploads the given source file to the destination location on a batch of nodes.
#
# The default implementation only supports batches of size 1 and will fail otherwise.
#
# Transports may override this method to implement their own batch processing.
def batch_upload(targets, source, destination, options = {}, &callback)
assert_batch_size_one("batch_upload()", targets)
target = targets.first
with_events(target, callback) do
@logger.debug { "Uploading: '#{source}' to #{destination} on #{target.uri}" }
upload(target, source, destination, reject_transport_options(target, options))
end
end
def batch_connected?(targets)
assert_batch_size_one("connected?()", targets)
connected?(targets.first)
end
# Split the given list of targets into a list of batches. The default
# implementation returns single-node batches.
#
# Transports may override this method, and the corresponding batch_*
# methods, to implement their own batch processing.
def batches(targets)
targets.map { |target| [target] }
end
# Transports should override this method with their own implementation of running a command.
def run_command(*_args)
raise NotImplementedError, "run_command() must be implemented by the transport class"
end
# Transports should override this method with their own implementation of running a script.
def run_script(*_args)
raise NotImplementedError, "run_script() must be implemented by the transport class"
end
# Transports should override this method with their own implementation of running a task.
def run_task(*_args)
raise NotImplementedError, "run_task() must be implemented by the transport class"
end
# Transports should override this method with their own implementation of file upload.
def upload(*_args)
raise NotImplementedError, "upload() must be implemented by the transport class"
end
# Transports should override this method with their own implementation of a connection test.
def connected?(_targets)
raise NotImplementedError, "connected?() must be implemented by the transport class"
end
# Unwraps any Sensitive data in an arguments Hash, so the plain-text is passed
# to the Task/Script.
#
# This works on deeply nested data structures composed of Hashes, Arrays, and
# and plain-old data types (int, string, etc).
def unwrap_sensitive_args(arguments)
# Skip this if Puppet isn't loaded
return arguments unless defined?(Puppet::Pops::Types::PSensitiveType::Sensitive)
case arguments
when Array
# iterate over the array, unwrapping all elements
arguments.map { |x| unwrap_sensitive_args(x) }
when Hash
# iterate over the arguments hash and unwrap all keys and values
arguments.each_with_object({}) { |(k, v), h|
h[unwrap_sensitive_args(k)] = unwrap_sensitive_args(v)
}
when Puppet::Pops::Types::PSensitiveType::Sensitive
# this value is Sensitive, unwrap it
unwrap_sensitive_args(arguments.unwrap)
else
# unknown data type, just return it
arguments
end
end
end
end
end
| 1 | 10,650 | I thought about adjusting for case here as well? For example `.rb,rb,.RB,.rb` would all map to ruby interpreter specified? Is that too much "magic"? Is there a sane workflow that someone would choose to map `.rb` to one interpreter and `.RB` to another? | puppetlabs-bolt | rb |
@@ -1235,12 +1235,15 @@ ostree_sysroot_get_subbootversion (OstreeSysroot *self)
* ostree_sysroot_get_booted_deployment:
* @self: Sysroot
*
+ * This function may only be called if the sysroot is loaded.
+ *
* Returns: (transfer none) (nullable): The currently booted deployment, or %NULL if none
*/
OstreeDeployment *
ostree_sysroot_get_booted_deployment (OstreeSysroot *self)
{
- g_return_val_if_fail (self->loadstate == OSTREE_SYSROOT_LOAD_STATE_LOADED, NULL);
+ g_assert (self);
+ g_assert (self->loadstate == OSTREE_SYSROOT_LOAD_STATE_LOADED);
return self->booted_deployment;
} | 1 | /*
* Copyright (C) 2013 Colin Walters <walters@verbum.org>
*
* SPDX-License-Identifier: LGPL-2.0+
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#include "config.h"
#include "otutil.h"
#include <sys/file.h>
#include <sys/mount.h>
#include <err.h>
#include <sys/wait.h>
#include "ostree.h"
#include "ostree-core-private.h"
#include "ostree-repo-private.h"
#include "ostree-sepolicy-private.h"
#include "ostree-sysroot-private.h"
#include "ostree-deployment-private.h"
#include "ostree-bootloader-uboot.h"
#include "ostree-bootloader-syslinux.h"
#include "ostree-bootloader-grub2.h"
#include "ostree-bootloader-zipl.h"
/**
* SECTION:ostree-sysroot
* @title: Root partition mount point
* @short_description: Manage physical root filesystem
*
* A #OstreeSysroot object represents a physical root filesystem,
* which in particular should contain a toplevel /ostree directory.
* Inside this directory is an #OstreeRepo in /ostree/repo, plus a set
* of deployments in /ostree/deploy.
*
* This class is not by default safe against concurrent use by threads
* or external processes. You can use ostree_sysroot_lock() to
* perform locking externally.
*/
typedef struct {
GObjectClass parent_class;
/* Signals */
void (*journal_msg) (OstreeSysroot *sysroot,
const char *msg);
} OstreeSysrootClass;
enum {
JOURNAL_MSG_SIGNAL,
LAST_SIGNAL,
};
static guint signals[LAST_SIGNAL] = { 0 };
enum {
PROP_0,
PROP_PATH
};
G_DEFINE_TYPE (OstreeSysroot, ostree_sysroot, G_TYPE_OBJECT)
static void
ostree_sysroot_finalize (GObject *object)
{
OstreeSysroot *self = OSTREE_SYSROOT (object);
g_clear_object (&self->path);
g_clear_object (&self->repo);
g_clear_pointer (&self->deployments, g_ptr_array_unref);
g_clear_object (&self->booted_deployment);
g_clear_object (&self->staged_deployment);
g_clear_pointer (&self->staged_deployment_data, (GDestroyNotify)g_variant_unref);
glnx_release_lock_file (&self->lock);
ostree_sysroot_unload (self);
G_OBJECT_CLASS (ostree_sysroot_parent_class)->finalize (object);
}
static void
ostree_sysroot_set_property(GObject *object,
guint prop_id,
const GValue *value,
GParamSpec *pspec)
{
OstreeSysroot *self = OSTREE_SYSROOT (object);
switch (prop_id)
{
case PROP_PATH:
self->path = g_value_dup_object (value);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
ostree_sysroot_get_property(GObject *object,
guint prop_id,
GValue *value,
GParamSpec *pspec)
{
OstreeSysroot *self = OSTREE_SYSROOT (object);
switch (prop_id)
{
case PROP_PATH:
g_value_set_object (value, self->path);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
ostree_sysroot_constructed (GObject *object)
{
OstreeSysroot *self = OSTREE_SYSROOT (object);
/* Ensure the system root path is set. */
if (self->path == NULL)
self->path = g_object_ref (_ostree_get_default_sysroot_path ());
G_OBJECT_CLASS (ostree_sysroot_parent_class)->constructed (object);
}
static void
ostree_sysroot_class_init (OstreeSysrootClass *klass)
{
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->constructed = ostree_sysroot_constructed;
object_class->get_property = ostree_sysroot_get_property;
object_class->set_property = ostree_sysroot_set_property;
object_class->finalize = ostree_sysroot_finalize;
g_object_class_install_property (object_class,
PROP_PATH,
g_param_spec_object ("path",
"",
"",
G_TYPE_FILE,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY));
/**
* OstreeSysroot::journal-msg:
* @self: Self
* @msg: Human-readable string (should not contain newlines)
*
* libostree will log to the journal various events, such as the /etc merge
* status, and transaction completion. Connect to this signal to also
* synchronously receive the text for those messages. This is intended to be
* used by command line tools which link to libostree as a library.
*
* Currently, the structured data is only available via the systemd journal.
*
* Since: 2017.10
*/
signals[JOURNAL_MSG_SIGNAL] =
g_signal_new ("journal-msg",
G_OBJECT_CLASS_TYPE (object_class),
G_SIGNAL_RUN_LAST,
G_STRUCT_OFFSET (OstreeSysrootClass, journal_msg),
NULL, NULL, NULL, G_TYPE_NONE, 1, G_TYPE_STRING);
}
static void
ostree_sysroot_init (OstreeSysroot *self)
{
const GDebugKey keys[] = {
{ "mutable-deployments", OSTREE_SYSROOT_DEBUG_MUTABLE_DEPLOYMENTS },
{ "test-fifreeze", OSTREE_SYSROOT_DEBUG_TEST_FIFREEZE },
{ "no-xattrs", OSTREE_SYSROOT_DEBUG_NO_XATTRS },
{ "no-dtb", OSTREE_SYSROOT_DEBUG_TEST_NO_DTB },
};
self->debug_flags = g_parse_debug_string (g_getenv ("OSTREE_SYSROOT_DEBUG"),
keys, G_N_ELEMENTS (keys));
self->sysroot_fd = -1;
self->boot_fd = -1;
}
/**
* ostree_sysroot_new:
* @path: (allow-none): Path to a system root directory, or %NULL to use the
* current visible root file system
*
* Create a new #OstreeSysroot object for the sysroot at @path. If @path is %NULL,
* the current visible root file system is used, equivalent to
* ostree_sysroot_new_default().
*
* Returns: (transfer full): An accessor object for an system root located at @path
*/
OstreeSysroot*
ostree_sysroot_new (GFile *path)
{
return g_object_new (OSTREE_TYPE_SYSROOT, "path", path, NULL);
}
/**
* ostree_sysroot_new_default:
*
* Returns: (transfer full): An accessor for the current visible root / filesystem
*/
OstreeSysroot*
ostree_sysroot_new_default (void)
{
return ostree_sysroot_new (NULL);
}
/**
* ostree_sysroot_set_mount_namespace_in_use:
*
* If this function is invoked, then libostree will assume that
* a private Linux mount namespace has been created by the process.
* The primary use case for this is to have e.g. /sysroot mounted
* read-only by default.
*
* If this function has been called, then when a function which requires
* writable access is invoked, libostree will automatically remount as writable
* any mount points on which it operates. This currently is just `/sysroot` and
* `/boot`.
*
* If you invoke this function, it must be before ostree_sysroot_load(); it may
* be invoked before or after ostree_sysroot_initialize().
*
* Since: 2020.1
*/
void
ostree_sysroot_set_mount_namespace_in_use (OstreeSysroot *self)
{
/* Must be before we're loaded, as otherwise we'd have to close/reopen all our
fds, e.g. the repo */
g_return_if_fail (self->loadstate < OSTREE_SYSROOT_LOAD_STATE_LOADED);
self->mount_namespace_in_use = TRUE;
}
/**
* ostree_sysroot_get_path:
* @self: Sysroot
*
* Returns: (transfer none) (not nullable): Path to rootfs
*/
GFile *
ostree_sysroot_get_path (OstreeSysroot *self)
{
return self->path;
}
/* Open a directory file descriptor for the sysroot if we haven't yet */
static gboolean
ensure_sysroot_fd (OstreeSysroot *self,
GError **error)
{
if (self->sysroot_fd == -1)
{
if (!glnx_opendirat (AT_FDCWD, gs_file_get_path_cached (self->path), TRUE,
&self->sysroot_fd, error))
return FALSE;
}
return TRUE;
}
gboolean
_ostree_sysroot_ensure_boot_fd (OstreeSysroot *self, GError **error)
{
if (self->boot_fd == -1)
{
if (!glnx_opendirat (self->sysroot_fd, "boot", TRUE,
&self->boot_fd, error))
return FALSE;
}
return TRUE;
}
static gboolean
remount_writable (const char *path, gboolean *did_remount, GError **error)
{
*did_remount = FALSE;
struct statvfs stvfsbuf;
if (statvfs (path, &stvfsbuf) < 0)
{
if (errno != ENOENT)
return glnx_throw_errno_prefix (error, "statvfs(%s)", path);
else
return TRUE;
}
if ((stvfsbuf.f_flag & ST_RDONLY) != 0)
{
/* OK, let's remount writable. */
if (mount (path, path, NULL, MS_REMOUNT | MS_RELATIME, "") < 0)
return glnx_throw_errno_prefix (error, "Remounting %s read-write", path);
*did_remount = TRUE;
g_debug ("remounted %s writable", path);
}
return TRUE;
}
/* Remount /sysroot read-write if necessary */
gboolean
_ostree_sysroot_ensure_writable (OstreeSysroot *self,
GError **error)
{
/* Do nothing if no mount namespace is in use */
if (!self->mount_namespace_in_use)
return TRUE;
/* If a mount namespace is in use, ensure we're initialized */
if (!ostree_sysroot_initialize (self, error))
return FALSE;
/* If we aren't operating on a booted system, then we don't
* do anything with mounts.
*/
if (!self->root_is_ostree_booted)
return TRUE;
/* In these cases we also require /boot */
if (!_ostree_sysroot_ensure_boot_fd (self, error))
return FALSE;
gboolean did_remount_sysroot = FALSE;
if (!remount_writable ("/sysroot", &did_remount_sysroot, error))
return FALSE;
gboolean did_remount_boot = FALSE;
if (!remount_writable ("/boot", &did_remount_boot, error))
return FALSE;
/* Now close and reopen our file descriptors */
ostree_sysroot_unload (self);
if (!ensure_sysroot_fd (self, error))
return FALSE;
return TRUE;
}
/**
* ostree_sysroot_get_fd:
* @self: Sysroot
*
* Access a file descriptor that refers to the root directory of this sysroot.
* ostree_sysroot_initialize() (or ostree_sysroot_load()) must have been invoked
* prior to calling this function.
*
* Returns: A file descriptor valid for the lifetime of @self
*/
int
ostree_sysroot_get_fd (OstreeSysroot *self)
{
g_return_val_if_fail (self->sysroot_fd != -1, -1);
return self->sysroot_fd;
}
/**
* ostree_sysroot_is_booted:
* @self: Sysroot
*
* Can only be invoked after `ostree_sysroot_initialize()`.
*
* Returns: %TRUE iff the sysroot points to a booted deployment
* Since: 2020.1
*/
gboolean
ostree_sysroot_is_booted (OstreeSysroot *self)
{
g_return_val_if_fail (self->loadstate >= OSTREE_SYSROOT_LOAD_STATE_INIT, FALSE);
return self->root_is_ostree_booted;
}
gboolean
_ostree_sysroot_bump_mtime (OstreeSysroot *self,
GError **error)
{
/* Allow other systems to monitor for changes */
if (utimensat (self->sysroot_fd, "ostree/deploy", NULL, 0) < 0)
{
glnx_set_prefix_error_from_errno (error, "%s", "futimens");
return FALSE;
}
return TRUE;
}
/**
* ostree_sysroot_unload:
* @self: Sysroot
*
* Release any resources such as file descriptors referring to the
* root directory of this sysroot. Normally, those resources are
* cleared by finalization, but in garbage collected languages that
* may not be predictable.
*
* This undoes the effect of `ostree_sysroot_load()`.
*/
void
ostree_sysroot_unload (OstreeSysroot *self)
{
glnx_close_fd (&self->sysroot_fd);
glnx_close_fd (&self->boot_fd);
}
/**
* ostree_sysroot_ensure_initialized:
* @self: Sysroot
* @cancellable: Cancellable
* @error: Error
*
* Ensure that @self is set up as a valid rootfs, by creating
* /ostree/repo, among other things.
*/
gboolean
ostree_sysroot_ensure_initialized (OstreeSysroot *self,
GCancellable *cancellable,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
if (!glnx_shutil_mkdir_p_at (self->sysroot_fd, "ostree/repo", 0755,
cancellable, error))
return FALSE;
if (!glnx_shutil_mkdir_p_at (self->sysroot_fd, "ostree/deploy", 0755,
cancellable, error))
return FALSE;
g_autoptr(OstreeRepo) repo =
ostree_repo_create_at (self->sysroot_fd, "ostree/repo",
OSTREE_REPO_MODE_BARE, NULL,
cancellable, error);
if (!repo)
return FALSE;
return TRUE;
}
void
_ostree_sysroot_emit_journal_msg (OstreeSysroot *self,
const char *msg)
{
g_signal_emit (self, signals[JOURNAL_MSG_SIGNAL], 0, msg);
}
gboolean
_ostree_sysroot_parse_deploy_path_name (const char *name,
char **out_csum,
int *out_serial,
GError **error)
{
static gsize regex_initialized;
static GRegex *regex;
if (g_once_init_enter (®ex_initialized))
{
regex = g_regex_new ("^([0-9a-f]+)\\.([0-9]+)$", 0, 0, NULL);
g_assert (regex);
g_once_init_leave (®ex_initialized, 1);
}
g_autoptr(GMatchInfo) match = NULL;
if (!g_regex_match (regex, name, 0, &match))
return glnx_throw (error, "Invalid deploy name '%s', expected CHECKSUM.TREESERIAL", name);
g_autofree char *serial_str = g_match_info_fetch (match, 2);
*out_csum = g_match_info_fetch (match, 1);
*out_serial = (int)g_ascii_strtoll (serial_str, NULL, 10);
return TRUE;
}
/* For a given bootversion, get its subbootversion from `/ostree/boot.$bootversion`. */
gboolean
_ostree_sysroot_read_current_subbootversion (OstreeSysroot *self,
int bootversion,
int *out_subbootversion,
GCancellable *cancellable,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
g_autofree char *ostree_bootdir_name = g_strdup_printf ("ostree/boot.%d", bootversion);
struct stat stbuf;
if (!glnx_fstatat_allow_noent (self->sysroot_fd, ostree_bootdir_name, &stbuf, AT_SYMLINK_NOFOLLOW, error))
return FALSE;
if (errno == ENOENT)
{
g_debug ("Didn't find $sysroot/ostree/boot.%d symlink; assuming subbootversion 0", bootversion);
*out_subbootversion = 0;
}
else
{
g_autofree char *current_subbootdir_name =
glnx_readlinkat_malloc (self->sysroot_fd, ostree_bootdir_name,
cancellable, error);
if (!current_subbootdir_name)
return FALSE;
if (g_str_has_suffix (current_subbootdir_name, ".0"))
*out_subbootversion = 0;
else if (g_str_has_suffix (current_subbootdir_name, ".1"))
*out_subbootversion = 1;
else
return glnx_throw (error, "Invalid target '%s' in %s",
current_subbootdir_name, ostree_bootdir_name);
}
return TRUE;
}
static gint
compare_boot_loader_configs (OstreeBootconfigParser *a,
OstreeBootconfigParser *b)
{
const char *a_version = ostree_bootconfig_parser_get (a, "version");
const char *b_version = ostree_bootconfig_parser_get (b, "version");
if (a_version && b_version)
{
int r = strverscmp (a_version, b_version);
/* Reverse */
return -r;
}
else if (a_version)
return -1;
else
return 1;
}
static int
compare_loader_configs_for_sorting (gconstpointer a_pp,
gconstpointer b_pp)
{
OstreeBootconfigParser *a = *((OstreeBootconfigParser**)a_pp);
OstreeBootconfigParser *b = *((OstreeBootconfigParser**)b_pp);
return compare_boot_loader_configs (a, b);
}
/* Read all the bootconfigs from `/boot/loader/`. */
gboolean
_ostree_sysroot_read_boot_loader_configs (OstreeSysroot *self,
int bootversion,
GPtrArray **out_loader_configs,
GCancellable *cancellable,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
g_autoptr(GPtrArray) ret_loader_configs =
g_ptr_array_new_with_free_func ((GDestroyNotify)g_object_unref);
g_autofree char *entries_path = g_strdup_printf ("boot/loader.%d/entries", bootversion);
gboolean entries_exists;
g_auto(GLnxDirFdIterator) dfd_iter = { 0, };
if (!ot_dfd_iter_init_allow_noent (self->sysroot_fd, entries_path,
&dfd_iter, &entries_exists, error))
return FALSE;
if (!entries_exists)
{
/* Note early return */
*out_loader_configs = g_steal_pointer (&ret_loader_configs);
return TRUE;
}
while (TRUE)
{
struct dirent *dent;
struct stat stbuf;
if (!glnx_dirfd_iterator_next_dent (&dfd_iter, &dent, cancellable, error))
return FALSE;
if (dent == NULL)
break;
if (!glnx_fstatat (dfd_iter.fd, dent->d_name, &stbuf, 0, error))
return FALSE;
if (g_str_has_prefix (dent->d_name, "ostree-") &&
g_str_has_suffix (dent->d_name, ".conf") &&
S_ISREG (stbuf.st_mode))
{
g_autoptr(OstreeBootconfigParser) config = ostree_bootconfig_parser_new ();
if (!ostree_bootconfig_parser_parse_at (config, dfd_iter.fd, dent->d_name, cancellable, error))
return glnx_prefix_error (error, "Parsing %s", dent->d_name);
g_ptr_array_add (ret_loader_configs, g_object_ref (config));
}
}
/* Callers expect us to give them a sorted array */
g_ptr_array_sort (ret_loader_configs, compare_loader_configs_for_sorting);
ot_transfer_out_value(out_loader_configs, &ret_loader_configs);
return TRUE;
}
/* Get the bootversion from the `/boot/loader` symlink. */
static gboolean
read_current_bootversion (OstreeSysroot *self,
int *out_bootversion,
GCancellable *cancellable,
GError **error)
{
int ret_bootversion;
struct stat stbuf;
if (!glnx_fstatat_allow_noent (self->sysroot_fd, "boot/loader", &stbuf, AT_SYMLINK_NOFOLLOW, error))
return FALSE;
if (errno == ENOENT)
{
g_debug ("Didn't find $sysroot/boot/loader symlink; assuming bootversion 0");
ret_bootversion = 0;
}
else
{
if (!S_ISLNK (stbuf.st_mode))
return glnx_throw (error, "Not a symbolic link: boot/loader");
g_autofree char *target =
glnx_readlinkat_malloc (self->sysroot_fd, "boot/loader", cancellable, error);
if (!target)
return FALSE;
if (g_strcmp0 (target, "loader.0") == 0)
ret_bootversion = 0;
else if (g_strcmp0 (target, "loader.1") == 0)
ret_bootversion = 1;
else
return glnx_throw (error, "Invalid target '%s' in boot/loader", target);
}
*out_bootversion = ret_bootversion;
return TRUE;
}
static gboolean
load_origin (OstreeSysroot *self,
OstreeDeployment *deployment,
GCancellable *cancellable,
GError **error)
{
g_autofree char *origin_path = ostree_deployment_get_origin_relpath (deployment);
glnx_autofd int fd = -1;
if (!ot_openat_ignore_enoent (self->sysroot_fd, origin_path, &fd, error))
return FALSE;
if (fd >= 0)
{
g_autofree char *origin_contents =
glnx_fd_readall_utf8 (fd, NULL, cancellable, error);
if (!origin_contents)
return FALSE;
g_autoptr(GKeyFile) origin = g_key_file_new ();
if (!g_key_file_load_from_data (origin, origin_contents, -1, 0, error))
return glnx_prefix_error (error, "Parsing %s", origin_path);
ostree_deployment_set_origin (deployment, origin);
}
return TRUE;
}
static gboolean
parse_bootlink (const char *bootlink,
int *out_entry_bootversion,
char **out_osname,
char **out_bootcsum,
int *out_treebootserial,
GError **error)
{
static gsize regex_initialized;
static GRegex *regex;
if (g_once_init_enter (®ex_initialized))
{
regex = g_regex_new ("^/ostree/boot.([01])/([^/]+)/([^/]+)/([0-9]+)$", 0, 0, NULL);
g_assert (regex);
g_once_init_leave (®ex_initialized, 1);
}
g_autoptr(GMatchInfo) match = NULL;
if (!g_regex_match (regex, bootlink, 0, &match))
return glnx_throw (error, "Invalid ostree= argument '%s', expected ostree=/ostree/boot.BOOTVERSION/OSNAME/BOOTCSUM/TREESERIAL", bootlink);
g_autofree char *bootversion_str = g_match_info_fetch (match, 1);
g_autofree char *treebootserial_str = g_match_info_fetch (match, 4);
*out_entry_bootversion = (int)g_ascii_strtoll (bootversion_str, NULL, 10);
*out_osname = g_match_info_fetch (match, 2);
*out_bootcsum = g_match_info_fetch (match, 3);
*out_treebootserial = (int)g_ascii_strtoll (treebootserial_str, NULL, 10);
return TRUE;
}
char *
_ostree_sysroot_get_runstate_path (OstreeDeployment *deployment, const char *key)
{
return g_strdup_printf ("%s%s.%d/%s",
_OSTREE_SYSROOT_DEPLOYMENT_RUNSTATE_DIR,
ostree_deployment_get_csum (deployment),
ostree_deployment_get_deployserial (deployment),
key);
}
static gboolean
parse_deployment (OstreeSysroot *self,
const char *boot_link,
OstreeDeployment **out_deployment,
GCancellable *cancellable,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
int entry_boot_version;
g_autofree char *osname = NULL;
g_autofree char *bootcsum = NULL;
int treebootserial = -1;
if (!parse_bootlink (boot_link, &entry_boot_version,
&osname, &bootcsum, &treebootserial,
error))
return FALSE;
g_autofree char *errprefix =
g_strdup_printf ("Parsing deployment %s in stateroot '%s'", boot_link, osname);
GLNX_AUTO_PREFIX_ERROR(errprefix, error);
const char *relative_boot_link = boot_link;
if (*relative_boot_link == '/')
relative_boot_link++;
g_autofree char *treebootserial_target =
glnx_readlinkat_malloc (self->sysroot_fd, relative_boot_link,
cancellable, error);
if (!treebootserial_target)
return FALSE;
const char *deploy_basename = glnx_basename (treebootserial_target);
g_autofree char *treecsum = NULL;
int deployserial = -1;
if (!_ostree_sysroot_parse_deploy_path_name (deploy_basename,
&treecsum, &deployserial, error))
return FALSE;
glnx_autofd int deployment_dfd = -1;
if (!glnx_opendirat (self->sysroot_fd, relative_boot_link, TRUE,
&deployment_dfd, error))
return FALSE;
/* See if this is the booted deployment */
const gboolean looking_for_booted_deployment =
(self->root_is_ostree_booted && !self->booted_deployment);
gboolean is_booted_deployment = FALSE;
if (looking_for_booted_deployment)
{
struct stat stbuf;
if (!glnx_fstat (deployment_dfd, &stbuf, error))
return FALSE;
/* A bit ugly, we're assigning to a sysroot-owned variable from deep in
* this parsing code. But eh, if something fails the sysroot state can't
* be relied on anyways.
*/
is_booted_deployment = (stbuf.st_dev == self->root_device &&
stbuf.st_ino == self->root_inode);
}
g_autoptr(OstreeDeployment) ret_deployment
= ostree_deployment_new (-1, osname, treecsum, deployserial,
bootcsum, treebootserial);
if (!load_origin (self, ret_deployment, cancellable, error))
return FALSE;
ret_deployment->unlocked = OSTREE_DEPLOYMENT_UNLOCKED_NONE;
g_autofree char *unlocked_development_path =
_ostree_sysroot_get_runstate_path (ret_deployment, _OSTREE_SYSROOT_DEPLOYMENT_RUNSTATE_FLAG_DEVELOPMENT);
g_autofree char *unlocked_transient_path =
_ostree_sysroot_get_runstate_path (ret_deployment, _OSTREE_SYSROOT_DEPLOYMENT_RUNSTATE_FLAG_TRANSIENT);
struct stat stbuf;
if (lstat (unlocked_development_path, &stbuf) == 0)
ret_deployment->unlocked = OSTREE_DEPLOYMENT_UNLOCKED_DEVELOPMENT;
else if (lstat (unlocked_transient_path, &stbuf) == 0)
ret_deployment->unlocked = OSTREE_DEPLOYMENT_UNLOCKED_TRANSIENT;
else
{
GKeyFile *origin = ostree_deployment_get_origin (ret_deployment);
g_autofree char *existing_unlocked_state = origin ?
g_key_file_get_string (origin, "origin", "unlocked", NULL) : NULL;
if (g_strcmp0 (existing_unlocked_state, "hotfix") == 0)
{
ret_deployment->unlocked = OSTREE_DEPLOYMENT_UNLOCKED_HOTFIX;
}
/* TODO: warn on unknown unlock types? */
}
g_debug ("Deployment %s.%d unlocked=%d", treecsum, deployserial, ret_deployment->unlocked);
if (is_booted_deployment)
self->booted_deployment = g_object_ref (ret_deployment);
if (out_deployment)
*out_deployment = g_steal_pointer (&ret_deployment);
return TRUE;
}
/* Given a bootloader config, return the value part of the ostree= kernel
* argument.
*/
static char *
get_ostree_kernel_arg_from_config (OstreeBootconfigParser *config)
{
const char *options = ostree_bootconfig_parser_get (config, "options");
if (!options)
return NULL;
g_auto(GStrv) opts = g_strsplit (options, " ", -1);
for (char **iter = opts; *iter; iter++)
{
const char *opt = *iter;
if (g_str_has_prefix (opt, "ostree="))
return g_strdup (opt + strlen ("ostree="));
}
return NULL;
}
/* From a BLS config, use its ostree= karg to find the deployment it points to and add it to
* the inout_deployments array. */
static gboolean
list_deployments_process_one_boot_entry (OstreeSysroot *self,
OstreeBootconfigParser *config,
GPtrArray *inout_deployments,
GCancellable *cancellable,
GError **error)
{
g_autofree char *ostree_arg = get_ostree_kernel_arg_from_config (config);
if (ostree_arg == NULL)
return glnx_throw (error, "No ostree= kernel argument found");
g_autoptr(OstreeDeployment) deployment = NULL;
if (!parse_deployment (self, ostree_arg, &deployment,
cancellable, error))
return FALSE;
ostree_deployment_set_bootconfig (deployment, config);
char **overlay_initrds = ostree_bootconfig_parser_get_overlay_initrds (config);
g_autoptr(GPtrArray) initrds_chksums = NULL;
for (char **it = overlay_initrds; it && *it; it++)
{
const char *basename = glnx_basename (*it);
if (strlen (basename) != (_OSTREE_SHA256_STRING_LEN + strlen (".img")))
return glnx_throw (error, "Malformed overlay initrd filename: %s", basename);
if (!initrds_chksums) /* lazy init */
initrds_chksums = g_ptr_array_new_full (g_strv_length (overlay_initrds), g_free);
g_ptr_array_add (initrds_chksums, g_strndup (basename, _OSTREE_SHA256_STRING_LEN));
}
if (initrds_chksums)
{
g_ptr_array_add (initrds_chksums, NULL);
_ostree_deployment_set_overlay_initrds (deployment, (char**)initrds_chksums->pdata);
}
g_ptr_array_add (inout_deployments, g_object_ref (deployment));
return TRUE;
}
static gint
compare_deployments_by_boot_loader_version_reversed (gconstpointer a_pp,
gconstpointer b_pp)
{
OstreeDeployment *a = *((OstreeDeployment**)a_pp);
OstreeDeployment *b = *((OstreeDeployment**)b_pp);
OstreeBootconfigParser *a_bootconfig = ostree_deployment_get_bootconfig (a);
OstreeBootconfigParser *b_bootconfig = ostree_deployment_get_bootconfig (b);
/* Staged deployments are always first */
if (ostree_deployment_is_staged (a))
{
g_assert (!ostree_deployment_is_staged (b));
return -1;
}
else if (ostree_deployment_is_staged (b))
return 1;
return compare_boot_loader_configs (a_bootconfig, b_bootconfig);
}
/**
* ostree_sysroot_load:
* @self: Sysroot
* @cancellable: Cancellable
* @error: Error
*
* Load deployment list, bootversion, and subbootversion from the
* rootfs @self.
*/
gboolean
ostree_sysroot_load (OstreeSysroot *self,
GCancellable *cancellable,
GError **error)
{
return ostree_sysroot_load_if_changed (self, NULL, cancellable, error);
}
static gboolean
ensure_repo (OstreeSysroot *self,
GError **error)
{
if (self->repo != NULL)
return TRUE;
if (!ensure_sysroot_fd (self, error))
return FALSE;
self->repo = ostree_repo_open_at (self->sysroot_fd, "ostree/repo", NULL, error);
if (!self->repo)
return FALSE;
/* Flag it as having been created via ostree_sysroot_get_repo(), and hold a
* weak ref for the remote-add handling.
*/
g_weak_ref_init (&self->repo->sysroot, self);
self->repo->sysroot_kind = OSTREE_REPO_SYSROOT_KIND_VIA_SYSROOT;
/* Reload the repo config in case any defaults depend on knowing if this is
* a system repo.
*/
if (!ostree_repo_reload_config (self->repo, NULL, error))
return FALSE;
return TRUE;
}
/**
* ostree_sysroot_initialize:
* @self: sysroot
*
* Subset of ostree_sysroot_load(); performs basic initialization. Notably, one
* can invoke `ostree_sysroot_get_fd()` after calling this function.
*
* It is not necessary to call this function if ostree_sysroot_load() is
* invoked.
*
* Since: 2020.1
*/
gboolean
ostree_sysroot_initialize (OstreeSysroot *self,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
if (self->loadstate < OSTREE_SYSROOT_LOAD_STATE_INIT)
{
/* Gather some global state; first if we have the global ostree-booted flag;
* we'll use it to sanity check that we found a booted deployment for example.
* Second, we also find out whether sysroot == /.
*/
if (!glnx_fstatat_allow_noent (AT_FDCWD, "/run/ostree-booted", NULL, 0, error))
return FALSE;
const gboolean ostree_booted = (errno == 0);
{ struct stat root_stbuf;
if (!glnx_fstatat (AT_FDCWD, "/", &root_stbuf, 0, error))
return FALSE;
self->root_device = root_stbuf.st_dev;
self->root_inode = root_stbuf.st_ino;
}
struct stat self_stbuf;
if (!glnx_fstatat (AT_FDCWD, gs_file_get_path_cached (self->path), &self_stbuf, 0, error))
return FALSE;
const gboolean root_is_sysroot =
(self->root_device == self_stbuf.st_dev &&
self->root_inode == self_stbuf.st_ino);
self->root_is_ostree_booted = (ostree_booted && root_is_sysroot);
self->loadstate = OSTREE_SYSROOT_LOAD_STATE_INIT;
}
return TRUE;
}
/* Reload the staged deployment from the file in /run */
gboolean
_ostree_sysroot_reload_staged (OstreeSysroot *self,
GError **error)
{
GLNX_AUTO_PREFIX_ERROR ("Loading staged deployment", error);
if (!self->root_is_ostree_booted)
return TRUE; /* Note early return */
g_assert (self->booted_deployment);
g_clear_object (&self->staged_deployment);
g_clear_pointer (&self->staged_deployment_data, (GDestroyNotify)g_variant_unref);
/* Read the staged state from disk */
glnx_autofd int fd = -1;
if (!ot_openat_ignore_enoent (AT_FDCWD, _OSTREE_SYSROOT_RUNSTATE_STAGED, &fd, error))
return FALSE;
if (fd != -1)
{
g_autoptr(GBytes) contents = ot_fd_readall_or_mmap (fd, 0, error);
if (!contents)
return FALSE;
g_autoptr(GVariant) staged_deployment_data =
g_variant_new_from_bytes ((GVariantType*)"a{sv}", contents, TRUE);
g_autoptr(GVariantDict) staged_deployment_dict =
g_variant_dict_new (staged_deployment_data);
/* Parse it */
g_autoptr(GVariant) target = NULL;
g_autofree char **kargs = NULL;
g_autofree char **overlay_initrds = NULL;
g_variant_dict_lookup (staged_deployment_dict, "target", "@a{sv}", &target);
g_variant_dict_lookup (staged_deployment_dict, "kargs", "^a&s", &kargs);
g_variant_dict_lookup (staged_deployment_dict, "overlay-initrds", "^a&s", &overlay_initrds);
if (target)
{
g_autoptr(OstreeDeployment) staged =
_ostree_sysroot_deserialize_deployment_from_variant (target, error);
if (!staged)
return FALSE;
_ostree_deployment_set_bootconfig_from_kargs (staged, kargs);
if (!load_origin (self, staged, NULL, error))
return FALSE;
_ostree_deployment_set_overlay_initrds (staged, overlay_initrds);
self->staged_deployment = g_steal_pointer (&staged);
self->staged_deployment_data = g_steal_pointer (&staged_deployment_data);
/* We set this flag for ostree_deployment_is_staged() because that API
* doesn't have access to the sysroot, which currently has the
* canonical "staged_deployment" reference.
*/
self->staged_deployment->staged = TRUE;
}
}
return TRUE;
}
/* Loads the current bootversion, subbootversion, and deployments, starting from the
* bootloader configs which are the source of truth.
*/
static gboolean
sysroot_load_from_bootloader_configs (OstreeSysroot *self,
GCancellable *cancellable,
GError **error)
{
struct stat stbuf;
int bootversion = 0;
if (!read_current_bootversion (self, &bootversion, cancellable, error))
return FALSE;
int subbootversion = 0;
if (!_ostree_sysroot_read_current_subbootversion (self, bootversion, &subbootversion,
cancellable, error))
return FALSE;
g_autoptr(GPtrArray) boot_loader_configs = NULL;
if (!_ostree_sysroot_read_boot_loader_configs (self, bootversion, &boot_loader_configs,
cancellable, error))
return FALSE;
g_autoptr(GPtrArray) deployments = g_ptr_array_new_with_free_func ((GDestroyNotify)g_object_unref);
g_assert (boot_loader_configs); /* Pacify static analysis */
for (guint i = 0; i < boot_loader_configs->len; i++)
{
OstreeBootconfigParser *config = boot_loader_configs->pdata[i];
/* Note this also sets self->booted_deployment */
if (!list_deployments_process_one_boot_entry (self, config, deployments,
cancellable, error))
{
g_clear_object (&self->booted_deployment);
return FALSE;
}
}
if (self->root_is_ostree_booted && !self->booted_deployment)
{
if (!glnx_fstatat_allow_noent (self->sysroot_fd, "boot/loader", NULL, AT_SYMLINK_NOFOLLOW, error))
return FALSE;
if (errno == ENOENT)
{
return glnx_throw (error, "Unexpected state: /run/ostree-booted found, but no /boot/loader directory");
}
else
{
return glnx_throw (error, "Unexpected state: /run/ostree-booted found and in / sysroot, but bootloader entry not found");
}
}
if (!_ostree_sysroot_reload_staged (self, error))
return FALSE;
/* Ensure the entires are sorted */
g_ptr_array_sort (deployments, compare_deployments_by_boot_loader_version_reversed);
/* Staged shows up first */
if (self->staged_deployment)
g_ptr_array_insert (deployments, 0, g_object_ref (self->staged_deployment));
/* And then set their index variables */
for (guint i = 0; i < deployments->len; i++)
{
OstreeDeployment *deployment = deployments->pdata[i];
ostree_deployment_set_index (deployment, i);
}
/* Determine whether we're "physical" or not, the first time we load deployments */
if (self->loadstate < OSTREE_SYSROOT_LOAD_STATE_LOADED)
{
/* If we have a booted deployment, the sysroot is / and we're definitely
* not physical.
*/
if (self->booted_deployment)
self->is_physical = FALSE; /* (the default, but explicit for clarity) */
/* Otherwise - check for /sysroot which should only exist in a deployment,
* not in ${sysroot} (a metavariable for the real physical root).
*/
else
{
if (!glnx_fstatat_allow_noent (self->sysroot_fd, "sysroot", &stbuf, 0, error))
return FALSE;
if (errno == ENOENT)
self->is_physical = TRUE;
}
/* Otherwise, the default is FALSE */
self->loadstate = OSTREE_SYSROOT_LOAD_STATE_LOADED;
}
self->bootversion = bootversion;
self->subbootversion = subbootversion;
self->deployments = g_steal_pointer (&deployments);
return TRUE;
}
/**
* ostree_sysroot_load_if_changed:
* @self: #OstreeSysroot
* @out_changed: (out caller-allocates):
* @cancellable: Cancellable
* @error: Error
*
* Since: 2016.4
*/
gboolean
ostree_sysroot_load_if_changed (OstreeSysroot *self,
gboolean *out_changed,
GCancellable *cancellable,
GError **error)
{
GLNX_AUTO_PREFIX_ERROR ("loading sysroot", error);
if (!ostree_sysroot_initialize (self, error))
return FALSE;
/* Here we also lazily initialize the repository. We didn't do this
* previous to v2017.6, but we do now to support the error-free
* ostree_sysroot_repo() API.
*/
if (!ensure_repo (self, error))
return FALSE;
struct stat stbuf;
if (!glnx_fstatat (self->sysroot_fd, "ostree/deploy", &stbuf, 0, error))
return FALSE;
if (self->loaded_ts.tv_sec == stbuf.st_mtim.tv_sec &&
self->loaded_ts.tv_nsec == stbuf.st_mtim.tv_nsec)
{
if (out_changed)
*out_changed = FALSE;
/* Note early return */
return TRUE;
}
g_clear_pointer (&self->deployments, g_ptr_array_unref);
g_clear_object (&self->booted_deployment);
g_clear_object (&self->staged_deployment);
self->bootversion = -1;
self->subbootversion = -1;
if (!sysroot_load_from_bootloader_configs (self, cancellable, error))
return FALSE;
self->loaded_ts = stbuf.st_mtim;
if (out_changed)
*out_changed = TRUE;
return TRUE;
}
int
ostree_sysroot_get_bootversion (OstreeSysroot *self)
{
return self->bootversion;
}
int
ostree_sysroot_get_subbootversion (OstreeSysroot *self)
{
return self->subbootversion;
}
/**
* ostree_sysroot_get_booted_deployment:
* @self: Sysroot
*
* Returns: (transfer none) (nullable): The currently booted deployment, or %NULL if none
*/
OstreeDeployment *
ostree_sysroot_get_booted_deployment (OstreeSysroot *self)
{
g_return_val_if_fail (self->loadstate == OSTREE_SYSROOT_LOAD_STATE_LOADED, NULL);
return self->booted_deployment;
}
/**
* ostree_sysroot_require_booted_deployment:
* @self: Sysroot
*
* Find the booted deployment, or return an error if not booted via OSTree.
*
* Returns: (transfer none) (not nullable): The currently booted deployment, or an error
* Since: 2021.1
*/
OstreeDeployment *
ostree_sysroot_require_booted_deployment (OstreeSysroot *self, GError **error)
{
g_return_val_if_fail (self->loadstate == OSTREE_SYSROOT_LOAD_STATE_LOADED, NULL);
if (!self->booted_deployment)
return glnx_null_throw (error, "Not currently booted into an OSTree system");
return self->booted_deployment;
}
/**
* ostree_sysroot_get_staged_deployment:
* @self: Sysroot
*
* Returns: (transfer none) (nullable): The currently staged deployment, or %NULL if none
*
* Since: 2018.5
*/
OstreeDeployment *
ostree_sysroot_get_staged_deployment (OstreeSysroot *self)
{
g_return_val_if_fail (self->loadstate == OSTREE_SYSROOT_LOAD_STATE_LOADED, NULL);
return self->staged_deployment;
}
/**
* ostree_sysroot_get_deployments:
* @self: Sysroot
*
* Returns: (element-type OstreeDeployment) (transfer container): Ordered list of deployments
*/
GPtrArray *
ostree_sysroot_get_deployments (OstreeSysroot *self)
{
g_return_val_if_fail (self->loadstate == OSTREE_SYSROOT_LOAD_STATE_LOADED, NULL);
GPtrArray *copy = g_ptr_array_new_with_free_func ((GDestroyNotify)g_object_unref);
for (guint i = 0; i < self->deployments->len; i++)
g_ptr_array_add (copy, g_object_ref (self->deployments->pdata[i]));
return copy;
}
/**
* ostree_sysroot_get_deployment_dirpath:
* @self: Repo
* @deployment: A deployment
*
* Note this function only returns a *relative* path - if you want
* to access, it, you must either use fd-relative api such as openat(),
* or concatenate it with the full ostree_sysroot_get_path().
*
* Returns: (transfer full) (not nullable): Path to deployment root directory, relative to sysroot
*/
char *
ostree_sysroot_get_deployment_dirpath (OstreeSysroot *self,
OstreeDeployment *deployment)
{
return g_strdup_printf ("ostree/deploy/%s/deploy/%s.%d",
ostree_deployment_get_osname (deployment),
ostree_deployment_get_csum (deployment),
ostree_deployment_get_deployserial (deployment));
}
/**
* ostree_sysroot_get_deployment_directory:
* @self: Sysroot
* @deployment: A deployment
*
* Returns: (transfer full): Path to deployment root directory
*/
GFile *
ostree_sysroot_get_deployment_directory (OstreeSysroot *self,
OstreeDeployment *deployment)
{
g_autofree char *dirpath = ostree_sysroot_get_deployment_dirpath (self, deployment);
return g_file_resolve_relative_path (self->path, dirpath);
}
/**
* ostree_sysroot_get_deployment_origin_path:
* @deployment_path: A deployment path
*
* Returns: (transfer full): Path to deployment origin file
*/
GFile *
ostree_sysroot_get_deployment_origin_path (GFile *deployment_path)
{
g_autoptr(GFile) deployment_parent = g_file_get_parent (deployment_path);
return ot_gfile_resolve_path_printf (deployment_parent,
"%s.origin",
gs_file_get_path_cached (deployment_path));
}
/**
* ostree_sysroot_get_repo:
* @self: Sysroot
* @out_repo: (out) (transfer full) (optional): Repository in sysroot @self
* @cancellable: Cancellable
* @error: Error
*
* Retrieve the OSTree repository in sysroot @self. The repo is guaranteed to be open
* (see ostree_repo_open()).
*
* Returns: %TRUE on success, %FALSE otherwise
*/
gboolean
ostree_sysroot_get_repo (OstreeSysroot *self,
OstreeRepo **out_repo,
GCancellable *cancellable,
GError **error)
{
if (!ensure_repo (self, error))
return FALSE;
if (out_repo != NULL)
*out_repo = g_object_ref (self->repo);
return TRUE;
}
/**
* ostree_sysroot_repo:
* @self: Sysroot
*
* This function is a variant of ostree_sysroot_get_repo() that cannot fail, and
* returns a cached repository. Can only be called after ostree_sysroot_initialize()
* or ostree_sysroot_load() has been invoked successfully.
*
* Returns: (transfer none) (not nullable): The OSTree repository in sysroot @self.
*
* Since: 2017.7
*/
OstreeRepo *
ostree_sysroot_repo (OstreeSysroot *self)
{
g_return_val_if_fail (self->loadstate >= OSTREE_SYSROOT_LOAD_STATE_LOADED, NULL);
g_assert (self->repo);
return self->repo;
}
static OstreeBootloader*
_ostree_sysroot_new_bootloader_by_type (
OstreeSysroot *sysroot,
OstreeCfgSysrootBootloaderOpt bl_type)
{
switch (bl_type)
{
case CFG_SYSROOT_BOOTLOADER_OPT_NONE:
/* No bootloader specified; do not query bootloaders to run. */
return NULL;
case CFG_SYSROOT_BOOTLOADER_OPT_GRUB2:
return (OstreeBootloader*) _ostree_bootloader_grub2_new (sysroot);
case CFG_SYSROOT_BOOTLOADER_OPT_SYSLINUX:
return (OstreeBootloader*) _ostree_bootloader_syslinux_new (sysroot);
case CFG_SYSROOT_BOOTLOADER_OPT_UBOOT:
return (OstreeBootloader*) _ostree_bootloader_uboot_new (sysroot);
case CFG_SYSROOT_BOOTLOADER_OPT_ZIPL:
/* We never consider zipl as active by default, so it can only be created
* if it's explicitly requested in the config */
return (OstreeBootloader*) _ostree_bootloader_zipl_new (sysroot);
case CFG_SYSROOT_BOOTLOADER_OPT_AUTO:
/* "auto" is handled by ostree_sysroot_query_bootloader so we should
* never get here: Fallthrough */
default:
g_assert_not_reached ();
}
}
/**
* ostree_sysroot_query_bootloader:
* @sysroot: Sysroot
* @out_bootloader: (out) (transfer full) (optional) (nullable): Return location for bootloader, may be %NULL
* @cancellable: Cancellable
* @error: Error
*/
gboolean
_ostree_sysroot_query_bootloader (OstreeSysroot *sysroot,
OstreeBootloader **out_bootloader,
GCancellable *cancellable,
GError **error)
{
OstreeRepo *repo = ostree_sysroot_repo (sysroot);
OstreeCfgSysrootBootloaderOpt bootloader_config = repo->bootloader;
g_debug ("Using bootloader configuration: %s",
CFG_SYSROOT_BOOTLOADER_OPTS_STR[bootloader_config]);
g_autoptr(OstreeBootloader) ret_loader = NULL;
if (bootloader_config == CFG_SYSROOT_BOOTLOADER_OPT_AUTO)
{
OstreeCfgSysrootBootloaderOpt probe[] = {
CFG_SYSROOT_BOOTLOADER_OPT_SYSLINUX,
CFG_SYSROOT_BOOTLOADER_OPT_GRUB2,
CFG_SYSROOT_BOOTLOADER_OPT_UBOOT,
};
for (int i = 0; i < G_N_ELEMENTS (probe); i++)
{
g_autoptr(OstreeBootloader) bl = _ostree_sysroot_new_bootloader_by_type (
sysroot, probe[i]);
gboolean is_active = FALSE;
if (!_ostree_bootloader_query (bl, &is_active, cancellable, error))
return FALSE;
if (is_active)
{
ret_loader = g_steal_pointer (&bl);
break;
}
}
}
else
ret_loader = _ostree_sysroot_new_bootloader_by_type (sysroot, bootloader_config);
ot_transfer_out_value (out_bootloader, &ret_loader)
return TRUE;
}
char *
_ostree_sysroot_join_lines (GPtrArray *lines)
{
GString *buf = g_string_new ("");
gboolean prev_was_empty = FALSE;
for (guint i = 0; i < lines->len; i++)
{
const char *line = lines->pdata[i];
/* Special bit to remove extraneous empty lines */
if (*line == '\0')
{
if (prev_was_empty || i == 0)
continue;
else
prev_was_empty = TRUE;
}
g_string_append (buf, line);
g_string_append_c (buf, '\n');
}
return g_string_free (buf, FALSE);
}
/**
* ostree_sysroot_query_deployments_for:
* @self: Sysroot
* @osname: (allow-none): "stateroot" name
* @out_pending: (out) (nullable) (optional) (transfer full): The pending deployment
* @out_rollback: (out) (nullable) (optional) (transfer full): The rollback deployment
*
* Find the pending and rollback deployments for @osname. Pass %NULL for @osname
* to use the booted deployment's osname. By default, pending deployment is the
* first deployment in the order that matches @osname, and @rollback will be the
* next one after the booted deployment, or the deployment after the pending if
* we're not looking at the booted deployment.
*
* Since: 2017.7
*/
void
ostree_sysroot_query_deployments_for (OstreeSysroot *self,
const char *osname,
OstreeDeployment **out_pending,
OstreeDeployment **out_rollback)
{
g_return_if_fail (osname != NULL || self->booted_deployment != NULL);
g_autoptr(OstreeDeployment) ret_pending = NULL;
g_autoptr(OstreeDeployment) ret_rollback = NULL;
if (osname == NULL)
osname = ostree_deployment_get_osname (self->booted_deployment);
gboolean found_booted = FALSE;
for (guint i = 0; i < self->deployments->len; i++)
{
OstreeDeployment *deployment = self->deployments->pdata[i];
/* Ignore deployments not for this osname */
if (strcmp (ostree_deployment_get_osname (deployment), osname) != 0)
continue;
/* Is this deployment booted? If so, note we're past the booted */
if (self->booted_deployment != NULL &&
ostree_deployment_equal (deployment, self->booted_deployment))
{
found_booted = TRUE;
continue;
}
if (!found_booted && !ret_pending)
ret_pending = g_object_ref (deployment);
else if (found_booted && !ret_rollback)
ret_rollback = g_object_ref (deployment);
}
if (out_pending)
*out_pending = g_steal_pointer (&ret_pending);
if (out_rollback)
*out_rollback = g_steal_pointer (&ret_rollback);
}
/**
* ostree_sysroot_get_merge_deployment:
* @self: Sysroot
* @osname: (allow-none): Operating system group
*
* Find the deployment to use as a configuration merge source; this is
* the first one in the current deployment list which matches osname.
*
* Returns: (transfer full) (nullable): Configuration merge deployment
*/
OstreeDeployment *
ostree_sysroot_get_merge_deployment (OstreeSysroot *self,
const char *osname)
{
g_return_val_if_fail (osname != NULL || self->booted_deployment != NULL, NULL);
if (osname == NULL)
osname = ostree_deployment_get_osname (self->booted_deployment);
/* If we're booted into the OS into which we're deploying, then
* merge the currently *booted* configuration, rather than the most
* recently deployed.
*/
if (self->booted_deployment &&
g_strcmp0 (ostree_deployment_get_osname (self->booted_deployment), osname) == 0)
return g_object_ref (self->booted_deployment);
else
{
g_autoptr(OstreeDeployment) pending = NULL;
ostree_sysroot_query_deployments_for (self, osname, &pending, NULL);
return g_steal_pointer (&pending);
}
}
/**
* ostree_sysroot_origin_new_from_refspec:
* @self: Sysroot
* @refspec: A refspec
*
* Returns: (transfer full) (not nullable): A new config file which sets @refspec as an origin
*/
GKeyFile *
ostree_sysroot_origin_new_from_refspec (OstreeSysroot *self,
const char *refspec)
{
GKeyFile *ret = g_key_file_new ();
g_key_file_set_string (ret, "origin", "refspec", refspec);
return ret;
}
/**
* ostree_sysroot_lock:
* @self: Self
* @error: Error
*
* Acquire an exclusive multi-process write lock for @self. This call
* blocks until the lock has been acquired. The lock is not
* reentrant.
*
* Release the lock with ostree_sysroot_unlock(). The lock will also
* be released if @self is deallocated.
*/
gboolean
ostree_sysroot_lock (OstreeSysroot *self,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
if (!_ostree_sysroot_ensure_writable (self, error))
return FALSE;
return glnx_make_lock_file (self->sysroot_fd, OSTREE_SYSROOT_LOCKFILE,
LOCK_EX, &self->lock, error);
}
/**
* ostree_sysroot_try_lock:
* @self: Self
* @out_acquired: (out): Whether or not the lock has been acquired
* @error: Error
*
* Try to acquire an exclusive multi-process write lock for @self. If
* another process holds the lock, this function will return
* immediately, setting @out_acquired to %FALSE, and returning %TRUE
* (and no error).
*
* Release the lock with ostree_sysroot_unlock(). The lock will also
* be released if @self is deallocated.
*/
gboolean
ostree_sysroot_try_lock (OstreeSysroot *self,
gboolean *out_acquired,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
if (!_ostree_sysroot_ensure_writable (self, error))
return FALSE;
/* Note use of LOCK_NB */
g_autoptr(GError) local_error = NULL;
if (!glnx_make_lock_file (self->sysroot_fd, OSTREE_SYSROOT_LOCKFILE,
LOCK_EX | LOCK_NB, &self->lock, &local_error))
{
if (g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK))
{
*out_acquired = FALSE;
}
else
{
g_propagate_error (error, g_steal_pointer (&local_error));
return FALSE;
}
}
else
{
*out_acquired = TRUE;
}
return TRUE;
}
/**
* ostree_sysroot_unlock:
* @self: Self
*
* Clear the lock previously acquired with ostree_sysroot_lock(). It
* is safe to call this function if the lock has not been previously
* acquired.
*/
void
ostree_sysroot_unlock (OstreeSysroot *self)
{
glnx_release_lock_file (&self->lock);
}
static void
lock_in_thread (GTask *task,
gpointer source,
gpointer task_data,
GCancellable *cancellable)
{
GError *local_error = NULL;
OstreeSysroot *self = source;
if (!ostree_sysroot_lock (self, &local_error))
goto out;
if (g_cancellable_set_error_if_cancelled (cancellable, &local_error))
ostree_sysroot_unlock (self);
out:
if (local_error)
g_task_return_error (task, local_error);
else
g_task_return_boolean (task, TRUE);
}
/**
* ostree_sysroot_lock_async:
* @self: Self
* @cancellable: Cancellable
* @callback: Callback
* @user_data: User data
*
* An asynchronous version of ostree_sysroot_lock().
*/
void
ostree_sysroot_lock_async (OstreeSysroot *self,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data)
{
g_autoptr(GTask) task = g_task_new (self, cancellable, callback, user_data);
g_task_run_in_thread (task, lock_in_thread);
}
/**
* ostree_sysroot_lock_finish:
* @self: Self
* @result: Result
* @error: Error
*
* Call when ostree_sysroot_lock_async() is ready.
*/
gboolean
ostree_sysroot_lock_finish (OstreeSysroot *self,
GAsyncResult *result,
GError **error)
{
g_return_val_if_fail (g_task_is_valid (result, self), FALSE);
return g_task_propagate_boolean ((GTask*)result, error);
}
/**
* ostree_sysroot_init_osname:
* @self: Sysroot
* @osname: Name group of operating system checkouts
* @cancellable: Cancellable
* @error: Error
*
* Initialize the directory structure for an "osname", which is a
* group of operating system deployments, with a shared `/var`. One
* is required for generating a deployment.
*
* Since: 2016.4
*/
gboolean
ostree_sysroot_init_osname (OstreeSysroot *self,
const char *osname,
GCancellable *cancellable,
GError **error)
{
if (!_ostree_sysroot_ensure_writable (self, error))
return FALSE;
const char *deploydir = glnx_strjoina ("ostree/deploy/", osname);
if (mkdirat (self->sysroot_fd, deploydir, 0777) < 0)
return glnx_throw_errno_prefix (error, "Creating %s", deploydir);
glnx_autofd int dfd = -1;
if (!glnx_opendirat (self->sysroot_fd, deploydir, TRUE, &dfd, error))
return FALSE;
if (mkdirat (dfd, "var", 0777) < 0)
return glnx_throw_errno_prefix (error, "Creating %s", "var");
/* This is a bit of a legacy hack...but we have to keep it around
* now. We're ensuring core subdirectories of /var exist.
*/
if (mkdirat (dfd, "var/tmp", 0777) < 0)
return glnx_throw_errno_prefix (error, "Creating %s", "var/tmp");
if (fchmodat (dfd, "var/tmp", 01777, 0) < 0)
return glnx_throw_errno_prefix (error, "fchmod %s", "var/tmp");
if (mkdirat (dfd, "var/lib", 0777) < 0)
return glnx_throw_errno_prefix (error, "Creating %s", "var/lib");
/* This needs to be available and properly labeled early during the boot
* process (before tmpfiles.d kicks in), so that journald can flush logs from
* the first boot there. https://bugzilla.redhat.com/show_bug.cgi?id=1265295
* */
if (mkdirat (dfd, "var/log", 0755) < 0)
return glnx_throw_errno_prefix (error, "Creating %s", "var/log");
if (symlinkat ("../run", dfd, "var/run") < 0)
return glnx_throw_errno_prefix (error, "Symlinking %s", "var/run");
if (symlinkat ("../run/lock", dfd, "var/lock") < 0)
return glnx_throw_errno_prefix (error, "Symlinking %s", "var/lock");
if (!_ostree_sysroot_bump_mtime (self, error))
return FALSE;
return TRUE;
}
/**
* ostree_sysroot_simple_write_deployment:
* @sysroot: Sysroot
* @osname: (allow-none): OS name
* @new_deployment: Prepend this deployment to the list
* @merge_deployment: (allow-none): Use this deployment for configuration merge
* @flags: Flags controlling behavior
* @cancellable: Cancellable
* @error: Error
*
* Prepend @new_deployment to the list of deployments, commit, and
* cleanup. By default, all other deployments for the given @osname
* except the merge deployment and the booted deployment will be
* garbage collected.
*
* If %OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_RETAIN is
* specified, then all current deployments will be kept.
*
* If %OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_RETAIN_PENDING is
* specified, then pending deployments will be kept.
*
* If %OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_RETAIN_ROLLBACK is
* specified, then rollback deployments will be kept.
*
* If %OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_NOT_DEFAULT is
* specified, then instead of prepending, the new deployment will be
* added right after the booted or merge deployment, instead of first.
*
* If %OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_NO_CLEAN is
* specified, then no cleanup will be performed after adding the
* deployment. Make sure to call ostree_sysroot_cleanup() sometime
* later, instead.
*/
gboolean
ostree_sysroot_simple_write_deployment (OstreeSysroot *sysroot,
const char *osname,
OstreeDeployment *new_deployment,
OstreeDeployment *merge_deployment,
OstreeSysrootSimpleWriteDeploymentFlags flags,
GCancellable *cancellable,
GError **error)
{
const gboolean postclean =
(flags & OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_NO_CLEAN) == 0;
const gboolean make_default =
!((flags & OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_NOT_DEFAULT) > 0);
const gboolean retain_pending =
(flags & OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_RETAIN_PENDING) > 0;
const gboolean retain_rollback =
(flags & OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_RETAIN_ROLLBACK) > 0;
gboolean retain =
(flags & OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_RETAIN) > 0;
g_autoptr(GPtrArray) deployments = ostree_sysroot_get_deployments (sysroot);
OstreeDeployment *booted_deployment = ostree_sysroot_get_booted_deployment (sysroot);
if (osname == NULL && booted_deployment)
osname = ostree_deployment_get_osname (booted_deployment);
gboolean added_new = FALSE;
g_autoptr(GPtrArray) new_deployments = g_ptr_array_new_with_free_func (g_object_unref);
if (make_default)
{
g_ptr_array_add (new_deployments, g_object_ref (new_deployment));
added_new = TRUE;
}
/* without a booted and a merge deployment, retain_pending/rollback become meaningless;
* let's just retain all deployments in that case */
if (!booted_deployment && !merge_deployment && (retain_pending || retain_rollback))
retain = TRUE;
/* tracks when we come across the booted deployment */
gboolean before_booted = TRUE;
gboolean before_merge = TRUE;
for (guint i = 0; i < deployments->len; i++)
{
OstreeDeployment *deployment = deployments->pdata[i];
const gboolean osname_matches =
(osname == NULL || g_str_equal (ostree_deployment_get_osname (deployment), osname));
const gboolean is_booted = ostree_deployment_equal (deployment, booted_deployment);
const gboolean is_merge = ostree_deployment_equal (deployment, merge_deployment);
if (is_booted)
before_booted = FALSE;
if (is_merge)
before_merge = FALSE;
/* use the booted deployment as the "crossover" point between pending and rollback
* deployments, fall back on merge deployment */
const gboolean passed_crossover = booted_deployment ? !before_booted : !before_merge;
/* Retain deployment if:
* - we're explicitly asked to, or
* - it's pinned
* - the deployment is for another osname, or
* - we're keeping pending deployments and this is a pending deployment, or
* - this is the merge or boot deployment, or
* - we're keeping rollback deployments and this is a rollback deployment
*/
if (retain
|| ostree_deployment_is_pinned (deployment)
|| !osname_matches
|| (retain_pending && !passed_crossover)
|| (is_booted || is_merge)
|| (retain_rollback && passed_crossover))
g_ptr_array_add (new_deployments, g_object_ref (deployment));
/* add right after booted/merge deployment */
if (!added_new && passed_crossover)
{
g_ptr_array_add (new_deployments, g_object_ref (new_deployment));
added_new = TRUE;
}
}
/* add it last if no crossover defined (or it's the first deployment in the sysroot) */
if (!added_new)
g_ptr_array_add (new_deployments, g_object_ref (new_deployment));
OstreeSysrootWriteDeploymentsOpts write_opts = { .do_postclean = postclean };
if (!ostree_sysroot_write_deployments_with_options (sysroot, new_deployments, &write_opts,
cancellable, error))
return FALSE;
return TRUE;
}
/* Deploy a copy of @target_deployment */
static gboolean
clone_deployment (OstreeSysroot *sysroot,
OstreeDeployment *target_deployment,
OstreeDeployment *merge_deployment,
GCancellable *cancellable,
GError **error)
{
/* Ensure we have a clean slate */
if (!ostree_sysroot_prepare_cleanup (sysroot, cancellable, error))
return glnx_prefix_error (error, "Performing initial cleanup");
/* Copy the bootloader config options */
OstreeBootconfigParser *bootconfig = ostree_deployment_get_bootconfig (merge_deployment);
g_auto(GStrv) previous_args = g_strsplit (ostree_bootconfig_parser_get (bootconfig, "options"), " ", -1);
g_autoptr(OstreeKernelArgs) kargs = ostree_kernel_args_new ();
ostree_kernel_args_append_argv (kargs, previous_args);
/* Deploy the copy */
g_autoptr(OstreeDeployment) new_deployment = NULL;
g_auto(GStrv) kargs_strv = ostree_kernel_args_to_strv (kargs);
if (!ostree_sysroot_deploy_tree (sysroot,
ostree_deployment_get_osname (target_deployment),
ostree_deployment_get_csum (target_deployment),
ostree_deployment_get_origin (target_deployment),
merge_deployment, kargs_strv, &new_deployment,
cancellable, error))
return FALSE;
/* Hotfixes push the deployment as rollback target, so it shouldn't
* be the default.
*/
if (!ostree_sysroot_simple_write_deployment (sysroot, ostree_deployment_get_osname (target_deployment),
new_deployment, merge_deployment,
OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_NOT_DEFAULT,
cancellable, error))
return FALSE;
return TRUE;
}
/* Do `mkdir()` followed by `chmod()` immediately afterwards to ensure `umask()` isn't
* masking permissions where we don't want it to. Thus we avoid calling `umask()`, which
* would affect the whole process. */
static gboolean mkdir_unmasked (int dfd,
const char *path,
int mode,
GCancellable *cancellable,
GError **error)
{
if (!glnx_shutil_mkdir_p_at (dfd, path, mode, cancellable, error))
return FALSE;
if (fchmodat (dfd, path, mode, 0) < 0)
return glnx_throw_errno_prefix (error, "chmod(%s)", path);
return TRUE;
}
/**
* ostree_sysroot_deployment_unlock:
* @self: Sysroot
* @deployment: Deployment
* @unlocked_state: Transition to this unlocked state
* @cancellable: Cancellable
* @error: Error
*
* Configure the target deployment @deployment such that it
* is writable. There are multiple modes, essentially differing
* in whether or not any changes persist across reboot.
*
* The `OSTREE_DEPLOYMENT_UNLOCKED_HOTFIX` state is persistent
* across reboots.
*
* Since: 2016.4
*/
gboolean
ostree_sysroot_deployment_unlock (OstreeSysroot *self,
OstreeDeployment *deployment,
OstreeDeploymentUnlockedState unlocked_state,
GCancellable *cancellable,
GError **error)
{
/* This function cannot re-lock */
g_return_val_if_fail (unlocked_state != OSTREE_DEPLOYMENT_UNLOCKED_NONE, FALSE);
OstreeDeploymentUnlockedState current_unlocked = ostree_deployment_get_unlocked (deployment);
if (current_unlocked != OSTREE_DEPLOYMENT_UNLOCKED_NONE)
return glnx_throw (error, "Deployment is already in unlocked state: %s",
ostree_deployment_unlocked_state_to_string (current_unlocked));
g_autoptr(OstreeDeployment) merge_deployment =
ostree_sysroot_get_merge_deployment (self, ostree_deployment_get_osname (deployment));
if (!merge_deployment)
return glnx_throw (error, "No previous deployment to duplicate");
/* For hotfixes, we push a rollback target */
if (unlocked_state == OSTREE_DEPLOYMENT_UNLOCKED_HOTFIX)
{
if (!clone_deployment (self, deployment, merge_deployment, cancellable, error))
return FALSE;
}
/* Crack it open */
if (!ostree_sysroot_deployment_set_mutable (self, deployment, TRUE,
cancellable, error))
return FALSE;
g_autofree char *deployment_path = ostree_sysroot_get_deployment_dirpath (self, deployment);
glnx_autofd int deployment_dfd = -1;
if (!glnx_opendirat (self->sysroot_fd, deployment_path, TRUE, &deployment_dfd, error))
return FALSE;
g_autoptr(OstreeSePolicy) sepolicy = ostree_sepolicy_new_at (deployment_dfd, cancellable, error);
if (!sepolicy)
return FALSE;
/* we want our /usr overlay to have the same permission bits as the one we'll shadow */
mode_t usr_mode;
{ struct stat stbuf;
if (!glnx_fstatat (deployment_dfd, "usr", &stbuf, 0, error))
return FALSE;
usr_mode = stbuf.st_mode;
}
const char *ovl_options = NULL;
static const char hotfix_ovl_options[] = "lowerdir=usr,upperdir=.usr-ovl-upper,workdir=.usr-ovl-work";
g_autofree char *unlock_ovldir = NULL;
switch (unlocked_state)
{
case OSTREE_DEPLOYMENT_UNLOCKED_NONE:
g_assert_not_reached ();
break;
case OSTREE_DEPLOYMENT_UNLOCKED_HOTFIX:
{
/* Create the overlayfs directories in the deployment root
* directly for hotfixes. The ostree-prepare-root.c helper
* is also set up to detect and mount these.
*/
if (!mkdir_unmasked (deployment_dfd, ".usr-ovl-upper", usr_mode, cancellable, error))
return FALSE;
if (!mkdir_unmasked (deployment_dfd, ".usr-ovl-work", usr_mode, cancellable, error))
return FALSE;
ovl_options = hotfix_ovl_options;
}
break;
case OSTREE_DEPLOYMENT_UNLOCKED_DEVELOPMENT:
case OSTREE_DEPLOYMENT_UNLOCKED_TRANSIENT:
{
unlock_ovldir = g_strdup ("/var/tmp/ostree-unlock-ovl.XXXXXX");
/* We're just doing transient development/hacking? Okay,
* stick the overlayfs bits in /var/tmp.
*/
const char *development_ovl_upper;
const char *development_ovl_work;
/* Ensure that the directory is created with the same label as `/usr` */
{ g_auto(OstreeSepolicyFsCreatecon) con = { 0, };
if (!_ostree_sepolicy_preparefscreatecon (&con, sepolicy,
"/usr", usr_mode, error))
return FALSE;
if (g_mkdtemp_full (unlock_ovldir, 0755) == NULL)
return glnx_throw_errno_prefix (error, "mkdtemp");
}
development_ovl_upper = glnx_strjoina (unlock_ovldir, "/upper");
if (!mkdir_unmasked (AT_FDCWD, development_ovl_upper, usr_mode, cancellable, error))
return FALSE;
development_ovl_work = glnx_strjoina (unlock_ovldir, "/work");
if (!mkdir_unmasked (AT_FDCWD, development_ovl_work, usr_mode, cancellable, error))
return FALSE;
ovl_options = glnx_strjoina ("lowerdir=usr,upperdir=", development_ovl_upper,
",workdir=", development_ovl_work);
}
}
g_assert (ovl_options != NULL);
/* Here we run `mount()` in a fork()ed child because we need to use
* `chdir()` in order to have the mount path options to overlayfs not
* look ugly.
*
* We can't `chdir()` inside a shared library since there may be
* threads, etc.
*/
{
pid_t mount_child = fork ();
if (mount_child < 0)
return glnx_throw_errno_prefix (error, "fork");
else if (mount_child == 0)
{
int mountflags = 0;
if (unlocked_state == OSTREE_DEPLOYMENT_UNLOCKED_TRANSIENT)
mountflags |= MS_RDONLY;
/* Child process. Do NOT use any GLib API here; it's not generally fork() safe.
*
* TODO: report errors across a pipe (or use the journal?) rather than
* spewing to stderr.
*/
if (fchdir (deployment_dfd) < 0)
err (1, "fchdir");
if (mount ("overlay", "/usr", "overlay", mountflags, ovl_options) < 0)
err (1, "mount");
exit (EXIT_SUCCESS);
}
else
{
/* Parent */
int estatus;
if (TEMP_FAILURE_RETRY (waitpid (mount_child, &estatus, 0)) < 0)
return glnx_throw_errno_prefix (error, "waitpid() on mount helper");
if (!g_spawn_check_exit_status (estatus, error))
return glnx_prefix_error (error, "Failed overlayfs mount");
}
}
g_autoptr(OstreeDeployment) deployment_clone = ostree_deployment_clone (deployment);
GKeyFile *origin_clone = ostree_deployment_get_origin (deployment_clone);
/* Now, write out the flag saying what we did */
switch (unlocked_state)
{
case OSTREE_DEPLOYMENT_UNLOCKED_NONE:
g_assert_not_reached ();
break;
case OSTREE_DEPLOYMENT_UNLOCKED_HOTFIX:
g_key_file_set_string (origin_clone, "origin", "unlocked",
ostree_deployment_unlocked_state_to_string (unlocked_state));
if (!ostree_sysroot_write_origin_file (self, deployment, origin_clone,
cancellable, error))
return FALSE;
break;
case OSTREE_DEPLOYMENT_UNLOCKED_DEVELOPMENT:
case OSTREE_DEPLOYMENT_UNLOCKED_TRANSIENT:
{
g_autofree char *devpath =
unlocked_state == OSTREE_DEPLOYMENT_UNLOCKED_DEVELOPMENT ?
_ostree_sysroot_get_runstate_path (deployment, _OSTREE_SYSROOT_DEPLOYMENT_RUNSTATE_FLAG_DEVELOPMENT)
:
_ostree_sysroot_get_runstate_path (deployment, _OSTREE_SYSROOT_DEPLOYMENT_RUNSTATE_FLAG_TRANSIENT);
g_autofree char *devpath_parent = dirname (g_strdup (devpath));
if (!glnx_shutil_mkdir_p_at (AT_FDCWD, devpath_parent, 0755, cancellable, error))
return FALSE;
if (!g_file_set_contents (devpath, unlock_ovldir, -1, error))
return FALSE;
}
}
/* For hotfixes we already pushed a rollback which will bump the
* mtime, but we need to bump it again so that clients get the state
* change for this deployment. For development we need to do this
* regardless.
*/
if (!_ostree_sysroot_bump_mtime (self, error))
return FALSE;
return TRUE;
}
/**
* ostree_sysroot_deployment_set_pinned:
* @self: Sysroot
* @deployment: A deployment
* @is_pinned: Whether or not deployment will be automatically GC'd
* @error: Error
*
* By default, deployments may be subject to garbage collection. Typical uses of
* libostree only retain at most 2 deployments. If @is_pinned is `TRUE`, a
* metadata bit will be set causing libostree to avoid automatic GC of the
* deployment. However, this is really an "advisory" note; it's still possible
* for e.g. older versions of libostree unaware of pinning to GC the deployment.
*
* This function does nothing and returns successfully if the deployment
* is already in the desired pinning state. It is an error to try to pin
* the staged deployment (as it's not in the bootloader entries).
*
* Since: 2018.3
*/
gboolean
ostree_sysroot_deployment_set_pinned (OstreeSysroot *self,
OstreeDeployment *deployment,
gboolean is_pinned,
GError **error)
{
const gboolean current_pin = ostree_deployment_is_pinned (deployment);
if (is_pinned == current_pin)
return TRUE;
if (ostree_deployment_is_staged (deployment))
return glnx_throw (error, "Cannot pin staged deployment");
g_autoptr(OstreeDeployment) deployment_clone = ostree_deployment_clone (deployment);
GKeyFile *origin_clone = ostree_deployment_get_origin (deployment_clone);
if (is_pinned)
g_key_file_set_boolean (origin_clone, OSTREE_ORIGIN_TRANSIENT_GROUP, "pinned", TRUE);
else
g_key_file_remove_key (origin_clone, OSTREE_ORIGIN_TRANSIENT_GROUP, "pinned", NULL);
if (!ostree_sysroot_write_origin_file (self, deployment, origin_clone, NULL, error))
return FALSE;
return TRUE;
}
| 1 | 19,638 | In this and the one below, should we assert `self != NULL` first? | ostreedev-ostree | c |
@@ -147,7 +147,7 @@ class GalleryControllerTest extends \PHPUnit_Framework_TestCase
$view = $galleryController->postGalleryMediaGalleryhasmediaAction(1, 2, new Request());
$this->assertInstanceOf('FOS\RestBundle\View\View', $view);
- $this->assertSame(200, $view->getStatusCode(), 'Should return 200');
+ $this->assertSame(200, $view->getResponse()->getStatusCode(), 'Should return 200');
}
public function testPostGalleryMediaGalleryhasmediaInvalidAction() | 1 | <?php
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <thomas.rabaix@sonata-project.org>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Tests\Controller\Api;
use Doctrine\Common\Collections\ArrayCollection;
use Sonata\MediaBundle\Controller\Api\GalleryController;
use Sonata\MediaBundle\Model\GalleryHasMedia;
use Symfony\Component\HttpFoundation\Request;
class GalleryTest extends GalleryHasMedia
{
}
/**
* Class GalleryControllerTest.
*
*
* @author Hugo Briand <briand@ekino.com>
*/
class GalleryControllerTest extends \PHPUnit_Framework_TestCase
{
public function testGetGalleriesAction()
{
$gManager = $this->getMock('Sonata\MediaBundle\Model\GalleryManagerInterface');
$mediaManager = $this->getMock('Sonata\MediaBundle\Model\MediaManagerInterface');
$formFactory = $this->getMock('Symfony\Component\Form\FormFactoryInterface');
$gManager->expects($this->once())->method('getPager')->will($this->returnValue(array()));
$gController = new GalleryController($gManager, $mediaManager, $formFactory, 'test');
$params = $this->getMock('FOS\RestBundle\Request\ParamFetcherInterface');
$params->expects($this->once())->method('all')->will($this->returnValue(array('page' => 1, 'count' => 10, 'orderBy' => array('id' => 'ASC'))));
$params->expects($this->exactly(3))->method('get');
$this->assertSame(array(), $gController->getGalleriesAction($params));
}
public function testGetGalleryAction()
{
$gManager = $this->getMock('Sonata\MediaBundle\Model\GalleryManagerInterface');
$mediaManager = $this->getMock('Sonata\MediaBundle\Model\MediaManagerInterface');
$gallery = $this->getMock('Sonata\MediaBundle\Model\GalleryInterface');
$formFactory = $this->getMock('Symfony\Component\Form\FormFactoryInterface');
$gManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$gController = new GalleryController($gManager, $mediaManager, $formFactory, 'test');
$this->assertSame($gallery, $gController->getGalleryAction(1));
}
/**
* @expectedException \Symfony\Component\HttpKernel\Exception\NotFoundHttpException
* @expectedExceptionMessage Gallery (42) not found
*/
public function testGetGalleryNotFoundAction()
{
$gManager = $this->getMock('Sonata\MediaBundle\Model\GalleryManagerInterface');
$mediaManager = $this->getMock('Sonata\MediaBundle\Model\MediaManagerInterface');
$formFactory = $this->getMock('Symfony\Component\Form\FormFactoryInterface');
$gManager->expects($this->once())->method('findOneBy');
$gController = new GalleryController($gManager, $mediaManager, $formFactory, 'test');
$gController->getGalleryAction(42);
}
public function testGetGalleryGalleryhasmediasAction()
{
$gManager = $this->getMock('Sonata\MediaBundle\Model\GalleryManagerInterface');
$galleryHasMedia = $this->getMock('Sonata\MediaBundle\Model\GalleryHasMediaInterface');
$gallery = $this->getMock('Sonata\MediaBundle\Model\GalleryInterface');
$formFactory = $this->getMock('Symfony\Component\Form\FormFactoryInterface');
$gallery->expects($this->once())->method('getGalleryHasMedias')->will($this->returnValue(array($galleryHasMedia)));
$gManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->getMock('Sonata\MediaBundle\Model\MediaManagerInterface');
$gController = new GalleryController($gManager, $mediaManager, $formFactory, 'test');
$this->assertSame(array($galleryHasMedia), $gController->getGalleryGalleryhasmediasAction(1));
}
public function testGetGalleryMediaAction()
{
$media = $this->getMock('Sonata\MediaBundle\Model\MediaInterface');
$formFactory = $this->getMock('Symfony\Component\Form\FormFactoryInterface');
$galleryHasMedia = $this->getMock('Sonata\MediaBundle\Model\GalleryHasMediaInterface');
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media));
$gallery = $this->getMock('Sonata\MediaBundle\Model\GalleryInterface');
$gallery->expects($this->once())->method('getGalleryHasMedias')->will($this->returnValue(array($galleryHasMedia)));
$gManager = $this->getMock('Sonata\MediaBundle\Model\GalleryManagerInterface');
$gManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->getMock('Sonata\MediaBundle\Model\MediaManagerInterface');
$gController = new GalleryController($gManager, $mediaManager, $formFactory, 'test');
$this->assertSame(array($media), $gController->getGalleryMediasAction(1));
}
public function testPostGalleryMediaGalleryhasmediaAction()
{
$media = $this->getMock('Sonata\MediaBundle\Model\MediaInterface');
$media2 = $this->getMock('Sonata\MediaBundle\Model\MediaInterface');
$media2->expects($this->any())->method('getId')->will($this->returnValue(1));
$galleryHasMedia = $this->getMock('Sonata\MediaBundle\Model\GalleryHasMediaInterface');
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media2));
$gallery = $this->getMock('Sonata\MediaBundle\Model\GalleryInterface');
$gallery->expects($this->once())->method('getGalleryHasMedias')->will($this->returnValue(array($galleryHasMedia)));
$galleryManager = $this->getMock('Sonata\MediaBundle\Model\GalleryManagerInterface');
$galleryManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->getMock('Sonata\MediaBundle\Model\MediaManagerInterface');
$mediaManager->expects($this->once())->method('findOneBy')->will($this->returnValue($media));
$form = $this->getMockBuilder('Symfony\Component\Form\Form')->disableOriginalConstructor()->getMock();
$form->expects($this->once())->method('handleRequest');
$form->expects($this->once())->method('isValid')->will($this->returnValue(true));
$form->expects($this->once())->method('getData')->will($this->returnValue($galleryHasMedia));
$formFactory = $this->getMock('Symfony\Component\Form\FormFactoryInterface');
$formFactory->expects($this->once())->method('createNamed')->will($this->returnValue($form));
$galleryController = new GalleryController($galleryManager, $mediaManager, $formFactory, 'Sonata\MediaBundle\Tests\Controller\Api\GalleryTest');
$view = $galleryController->postGalleryMediaGalleryhasmediaAction(1, 2, new Request());
$this->assertInstanceOf('FOS\RestBundle\View\View', $view);
$this->assertSame(200, $view->getStatusCode(), 'Should return 200');
}
public function testPostGalleryMediaGalleryhasmediaInvalidAction()
{
$media = $this->getMock('Sonata\MediaBundle\Model\MediaInterface');
$media->expects($this->any())->method('getId')->will($this->returnValue(1));
$galleryHasMedia = $this->getMock('Sonata\MediaBundle\Model\GalleryHasMediaInterface');
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media));
$gallery = $this->getMock('Sonata\MediaBundle\Model\GalleryInterface');
$gallery->expects($this->once())->method('getGalleryHasMedias')->will($this->returnValue(array($galleryHasMedia)));
$galleryManager = $this->getMock('Sonata\MediaBundle\Model\GalleryManagerInterface');
$galleryManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->getMock('Sonata\MediaBundle\Model\MediaManagerInterface');
$mediaManager->expects($this->once())->method('findOneBy')->will($this->returnValue($media));
$formFactory = $this->getMock('Symfony\Component\Form\FormFactoryInterface');
$galleryController = new GalleryController($galleryManager, $mediaManager, $formFactory, 'Sonata\MediaBundle\Tests\Controller\Api\GalleryTest');
$view = $galleryController->postGalleryMediaGalleryhasmediaAction(1, 1, new Request());
$this->assertInstanceOf('FOS\RestBundle\View\View', $view);
$this->assertSame(400, $view->getStatusCode(), 'Should return 400');
}
public function testPutGalleryMediaGalleryhasmediaAction()
{
$media = $this->getMock('Sonata\MediaBundle\Model\MediaInterface');
$media->expects($this->any())->method('getId')->will($this->returnValue(1));
$galleryHasMedia = $this->getMock('Sonata\MediaBundle\Model\GalleryHasMediaInterface');
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media));
$gallery = $this->getMock('Sonata\MediaBundle\Model\GalleryInterface');
$gallery->expects($this->once())->method('getGalleryHasMedias')->will($this->returnValue(array($galleryHasMedia)));
$galleryManager = $this->getMock('Sonata\MediaBundle\Model\GalleryManagerInterface');
$galleryManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->getMock('Sonata\MediaBundle\Model\MediaManagerInterface');
$mediaManager->expects($this->once())->method('findOneBy')->will($this->returnValue($media));
$form = $this->getMockBuilder('Symfony\Component\Form\Form')->disableOriginalConstructor()->getMock();
$form->expects($this->once())->method('handleRequest');
$form->expects($this->once())->method('isValid')->will($this->returnValue(true));
$form->expects($this->once())->method('getData')->will($this->returnValue($galleryHasMedia));
$formFactory = $this->getMock('Symfony\Component\Form\FormFactoryInterface');
$formFactory->expects($this->once())->method('createNamed')->will($this->returnValue($form));
$galleryController = new GalleryController($galleryManager, $mediaManager, $formFactory, 'Sonata\MediaBundle\Tests\Controller\Api\GalleryTest');
$view = $galleryController->putGalleryMediaGalleryhasmediaAction(1, 1, new Request());
$this->assertInstanceOf('FOS\RestBundle\View\View', $view);
$this->assertSame(200, $view->getStatusCode(), 'Should return 200');
}
public function testPutGalleryMediaGalleryhasmediaInvalidAction()
{
$media = $this->getMock('Sonata\MediaBundle\Model\MediaInterface');
$media->expects($this->any())->method('getId')->will($this->returnValue(1));
$galleryHasMedia = $this->getMock('Sonata\MediaBundle\Model\GalleryHasMediaInterface');
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media));
$gallery = $this->getMock('Sonata\MediaBundle\Model\GalleryInterface');
$gallery->expects($this->once())->method('getGalleryHasMedias')->will($this->returnValue(array($galleryHasMedia)));
$galleryManager = $this->getMock('Sonata\MediaBundle\Model\GalleryManagerInterface');
$galleryManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->getMock('Sonata\MediaBundle\Model\MediaManagerInterface');
$mediaManager->expects($this->once())->method('findOneBy')->will($this->returnValue($media));
$form = $this->getMockBuilder('Symfony\Component\Form\Form')->disableOriginalConstructor()->getMock();
$form->expects($this->once())->method('handleRequest');
$form->expects($this->once())->method('isValid')->will($this->returnValue(false));
$formFactory = $this->getMock('Symfony\Component\Form\FormFactoryInterface');
$formFactory->expects($this->once())->method('createNamed')->will($this->returnValue($form));
$galleryController = new GalleryController($galleryManager, $mediaManager, $formFactory, 'Sonata\MediaBundle\Tests\Controller\Api\GalleryTest');
$view = $galleryController->putGalleryMediaGalleryhasmediaAction(1, 1, new Request());
$this->assertInstanceOf('Symfony\Component\Form\FormInterface', $view);
}
public function testDeleteGalleryMediaGalleryhasmediaAction()
{
$media = $this->getMock('Sonata\MediaBundle\Model\MediaInterface');
$media->expects($this->any())->method('getId')->will($this->returnValue(1));
$galleryHasMedia = $this->getMock('Sonata\MediaBundle\Model\GalleryHasMediaInterface');
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media));
$gallery = $this->getMock('Sonata\MediaBundle\Model\GalleryInterface');
$gallery->expects($this->any())->method('getGalleryHasMedias')->will($this->returnValue(new ArrayCollection(array($galleryHasMedia))));
$galleryManager = $this->getMock('Sonata\MediaBundle\Model\GalleryManagerInterface');
$galleryManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->getMock('Sonata\MediaBundle\Model\MediaManagerInterface');
$mediaManager->expects($this->once())->method('findOneBy')->will($this->returnValue($media));
$formFactory = $this->getMock('Symfony\Component\Form\FormFactoryInterface');
$galleryController = new GalleryController($galleryManager, $mediaManager, $formFactory, 'Sonata\MediaBundle\Tests\Controller\Api\GalleryTest');
$view = $galleryController->deleteGalleryMediaGalleryhasmediaAction(1, 1);
$this->assertSame(array('deleted' => true), $view);
}
public function testDeleteGalleryMediaGalleryhasmediaInvalidAction()
{
$media = $this->getMock('Sonata\MediaBundle\Model\MediaInterface');
$media2 = $this->getMock('Sonata\MediaBundle\Model\MediaInterface');
$media2->expects($this->any())->method('getId')->will($this->returnValue(2));
$galleryHasMedia = $this->getMock('Sonata\MediaBundle\Model\GalleryHasMediaInterface');
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media2));
$gallery = $this->getMock('Sonata\MediaBundle\Model\GalleryInterface');
$gallery->expects($this->any())->method('getGalleryHasMedias')->will($this->returnValue(new ArrayCollection(array($galleryHasMedia))));
$galleryManager = $this->getMock('Sonata\MediaBundle\Model\GalleryManagerInterface');
$galleryManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->getMock('Sonata\MediaBundle\Model\MediaManagerInterface');
$mediaManager->expects($this->once())->method('findOneBy')->will($this->returnValue($media));
$formFactory = $this->getMock('Symfony\Component\Form\FormFactoryInterface');
$galleryController = new GalleryController($galleryManager, $mediaManager, $formFactory, 'Sonata\MediaBundle\Tests\Controller\Api\GalleryTest');
$view = $galleryController->deleteGalleryMediaGalleryhasmediaAction(1, 1);
$this->assertInstanceOf('FOS\RestBundle\View\View', $view);
$this->assertSame(400, $view->getStatusCode(), 'Should return 400');
}
}
| 1 | 8,321 | Why this is needed? | sonata-project-SonataMediaBundle | php |
@@ -149,6 +149,12 @@ class PackageEntry:
"""
self._meta['user_meta'] = meta
+ def get_meta(self):
+ """
+ Gets the user_meta for this PackageEntry.
+ """
+ return self.meta or None
+
def _verify_hash(self, read_bytes):
"""
Verifies hash of bytes | 1 | from collections import deque
import gc
import hashlib
import io
import json
import pathlib
import os
import shutil
import time
from multiprocessing import Pool
import uuid
import warnings
import jsonlines
from tqdm import tqdm
from .data_transfer import (
calculate_sha256, copy_file, copy_file_list, get_bytes, get_size_and_version,
list_object_versions, list_url, put_bytes
)
from .exceptions import PackageException
from .formats import FormatRegistry
from .telemetry import ApiTelemetry
from .util import (
QuiltException, fix_url, get_from_config, get_install_location,
validate_package_name, quiltignore_filter, validate_key, extract_file_extension,
parse_sub_package_name)
from .util import CACHE_PATH, TEMPFILE_DIR_PATH as APP_DIR_TEMPFILE_DIR, PhysicalKey, \
user_is_configured_to_custom_stack, catalog_package_url, DISABLE_TQDM
def hash_file(readable_file):
""" Returns SHA256 hash of readable file-like object """
buf = readable_file.read(4096)
hasher = hashlib.sha256()
while buf:
hasher.update(buf)
buf = readable_file.read(4096)
return hasher.hexdigest()
def _delete_local_physical_key(pk):
assert pk.is_local(), "This function only works on files that live on a local disk"
pathlib.Path(pk.path).unlink()
def _filesystem_safe_encode(key):
"""Returns the sha256 of the key. This ensures there are no slashes, uppercase/lowercase conflicts,
avoids `OSError: [Errno 36] File name too long:`, etc."""
return hashlib.sha256(key.encode()).hexdigest()
class ObjectPathCache:
@classmethod
def _cache_path(cls, url):
url_hash = _filesystem_safe_encode(url)
return CACHE_PATH / url_hash[0:2] / url_hash[2:]
@classmethod
def get(cls, url):
cache_path = cls._cache_path(url)
try:
with open(cache_path) as fd:
path, dev, ino, mtime = json.load(fd)
except (FileNotFoundError, ValueError):
return None
try:
stat = pathlib.Path(path).stat()
except FileNotFoundError:
return None
# check if device, file, and timestamp are unchanged => cache hit
# see also https://docs.python.org/3/library/os.html#os.stat_result
if stat.st_dev == dev and stat.st_ino == ino and stat.st_mtime_ns == mtime:
return path
else:
return None
@classmethod
def set(cls, url, path):
stat = pathlib.Path(path).stat()
cache_path = cls._cache_path(url)
cache_path.parent.mkdir(parents=True, exist_ok=True)
with open(cache_path, 'w') as fd:
json.dump([path, stat.st_dev, stat.st_ino, stat.st_mtime_ns], fd)
@classmethod
def clear(cls):
shutil.rmtree(CACHE_PATH)
class PackageEntry:
"""
Represents an entry at a logical key inside a package.
"""
__slots__ = ['physical_key', 'size', 'hash', '_meta']
def __init__(self, physical_key, size, hash_obj, meta):
"""
Creates an entry.
Args:
physical_key: a URI (either `s3://` or `file://`)
size(number): size of object in bytes
hash({'type': string, 'value': string}): hash object
for example: {'type': 'SHA256', 'value': 'bb08a...'}
meta(dict): metadata dictionary
Returns:
a PackageEntry
"""
assert isinstance(physical_key, PhysicalKey)
self.physical_key = physical_key
self.size = size
self.hash = hash_obj
self._meta = meta or {}
def __eq__(self, other):
return (
# Don't check physical keys.
self.size == other.size
and self.hash == other.hash
and self._meta == other._meta
)
def __repr__(self):
return f"PackageEntry('{self.physical_key}')"
def as_dict(self):
"""
Returns dict representation of entry.
"""
return {
'physical_keys': [str(self.physical_key)],
'size': self.size,
'hash': self.hash,
'meta': self._meta
}
@property
def meta(self):
return self._meta.get('user_meta', dict())
def set_meta(self, meta):
"""
Sets the user_meta for this PackageEntry.
"""
self._meta['user_meta'] = meta
def _verify_hash(self, read_bytes):
"""
Verifies hash of bytes
"""
if self.hash is None:
raise QuiltException("Hash missing - need to build the package")
if self.hash.get('type') != 'SHA256':
raise NotImplementedError
digest = hashlib.sha256(read_bytes).hexdigest()
if digest != self.hash.get('value'):
raise QuiltException("Hash validation failed")
def set(self, path=None, meta=None):
"""
Returns self with the physical key set to path.
Args:
logical_key(string): logical key to update
path(string): new path to place at logical_key in the package
Currently only supports a path on local disk
meta(dict): metadata dict to attach to entry. If meta is provided, set just
updates the meta attached to logical_key without changing anything
else in the entry
Returns:
self
"""
if path is not None:
self.physical_key = PhysicalKey.from_url(fix_url(path))
self.size = None
self.hash = None
elif meta is not None:
self.set_meta(meta)
else:
raise PackageException('Must specify either path or meta')
def get(self):
"""
Returns the physical key of this PackageEntry.
"""
return str(self.physical_key)
def get_cached_path(self):
"""
Returns a locally cached physical key, if available.
"""
if not self.physical_key.is_local():
return ObjectPathCache.get(str(self.physical_key))
return None
def get_bytes(self, use_cache_if_available=True):
"""
Returns the bytes of the object this entry corresponds to. If 'use_cache_if_available'=True, will first try to
retrieve the bytes from cache.
"""
if use_cache_if_available:
cached_path = self.get_cached_path()
if cached_path is not None:
return get_bytes(PhysicalKey(None, cached_path, None))
data = get_bytes(self.physical_key)
return data
def get_as_json(self, use_cache_if_available=True):
"""
Returns a JSON file as a `dict`. Assumes that the file is encoded using utf-8.
If 'use_cache_if_available'=True, will first try to retrieve the object from cache.
"""
obj_bytes = self.get_bytes(use_cache_if_available=use_cache_if_available)
return json.loads(obj_bytes.decode("utf-8"))
def get_as_string(self, use_cache_if_available=True):
"""
Return the object as a string. Assumes that the file is encoded using utf-8.
If 'use_cache_if_available'=True, will first try to retrieve the object from cache.
"""
obj_bytes = self.get_bytes(use_cache_if_available=use_cache_if_available)
return obj_bytes.decode("utf-8")
def deserialize(self, func=None, **format_opts):
"""
Returns the object this entry corresponds to.
Args:
func: Skip normal deserialization process, and call func(bytes),
returning the result directly.
**format_opts: Some data formats may take options. Though
normally handled by metadata, these can be overridden here.
Returns:
The deserialized object from the logical_key
Raises:
physical key failure
hash verification fail
when deserialization metadata is not present
"""
data = get_bytes(self.physical_key)
if func is not None:
return func(data)
pkey_ext = pathlib.PurePosixPath(self.physical_key.path).suffix
# Verify format can be handled before checking hash. Raises if none found.
formats = FormatRegistry.search(None, self._meta, pkey_ext)
# Verify hash before deserializing..
self._verify_hash(data)
return formats[0].deserialize(data, self._meta, pkey_ext, **format_opts)
def fetch(self, dest=None):
"""
Gets objects from entry and saves them to dest.
Args:
dest: where to put the files
Defaults to the entry name
Returns:
None
"""
if dest is None:
name = self.physical_key.basename()
dest = PhysicalKey.from_path('.').join(name)
else:
dest = PhysicalKey.from_url(fix_url(dest))
copy_file(self.physical_key, dest)
# return a package reroot package physical keys after the copy operation succeeds
# see GH#388 for context
return self.with_physical_key(dest)
def __call__(self, func=None, **kwargs):
"""
Shorthand for self.deserialize()
"""
return self.deserialize(func=func, **kwargs)
def with_physical_key(self, key):
return self.__class__(key, self.size, self.hash, self._meta)
@property
def physical_keys(self):
"""
Deprecated
"""
return [self.physical_key]
class Package:
""" In-memory representation of a package """
def __init__(self):
self._children = {}
self._meta = {'version': 'v0'}
@ApiTelemetry("package.__repr__")
def __repr__(self, max_lines=20):
"""
String representation of the Package.
"""
def _create_str(results_dict, level=0, parent=True):
"""
Creates a string from the results dict
"""
result = ''
keys = sorted(results_dict.keys())
if not keys:
return result
if parent:
has_remote_entries = any(
self._map(
lambda lk, entry: not entry.physical_key.is_local()
)
)
pkg_type = 'remote' if has_remote_entries else 'local'
result = f'({pkg_type} Package)\n'
for key in keys:
result += ' ' + (' ' * level) + '└─' + key + '\n'
result += _create_str(results_dict[key], level + 1, parent=False)
return result
if not self.keys():
return '(empty Package)'
# traverse the tree of package directories and entries to get the list of
# display objects. candidates is a deque of shape
# ((logical_key, Package | PackageEntry), [list of parent key])
candidates = deque(([x, []] for x in self._children.items()))
results_dict = {}
results_total = 0
more_objects_than_lines = False
while candidates:
[[logical_key, entry], parent_keys] = candidates.popleft()
if isinstance(entry, Package):
logical_key = logical_key + '/'
new_parent_keys = parent_keys.copy()
new_parent_keys.append(logical_key)
for child_key in sorted(entry.keys()):
candidates.append([[child_key, entry[child_key]], new_parent_keys])
current_result_level = results_dict
for key in parent_keys:
current_result_level = current_result_level[key]
current_result_level[logical_key] = {}
results_total += 1
if results_total >= max_lines:
more_objects_than_lines = True
break
repr_str = _create_str(results_dict)
# append '...' if the package is larger than max_size
if more_objects_than_lines:
repr_str += ' ' + '...\n'
return repr_str
@property
def meta(self):
return self._meta.get('user_meta', dict())
@classmethod
@ApiTelemetry("package.install")
def install(cls, name, registry=None, top_hash=None, dest=None, dest_registry=None):
"""
Installs a named package to the local registry and downloads its files.
Args:
name(str): Name of package to install. It also can be passed as NAME/PATH,
in this case only the sub-package or the entry specified by PATH will
be downloaded.
registry(str): Registry where package is located.
Defaults to the default remote registry.
top_hash(str): Hash of package to install. Defaults to latest.
dest(str): Local path to download files to.
dest_registry(str): Registry to install package to. Defaults to local registry.
"""
if registry is None:
registry = get_from_config('default_remote_registry')
if registry is None:
raise QuiltException(
"No registry specified and no default_remote_registry configured. Please "
"specify a registry or configure a default remote registry with quilt3.config"
)
else:
registry = fix_url(registry)
registry_parsed = PhysicalKey.from_url(registry)
if dest_registry is None:
dest_registry = get_from_config('default_local_registry')
else:
dest_registry = fix_url(dest_registry)
dest_registry_parsed = PhysicalKey.from_url(dest_registry)
if not dest_registry_parsed.is_local():
raise QuiltException(
f"Can only 'install' to a local registry, but 'dest_registry' "
f"{dest_registry!r} is a remote path. To store a package in a remote "
f"registry, use 'push' or 'build' instead."
)
if dest is None:
dest_parsed = PhysicalKey.from_url(get_install_location()).join(name)
else:
dest_parsed = PhysicalKey.from_url(fix_url(dest))
if not dest_parsed.is_local():
raise QuiltException(
f"Invalid package destination path {dest!r}. 'dest', if set, must point at "
f"the local filesystem. To copy a package to a remote registry use 'push' or "
f"'build' instead."
)
parts = parse_sub_package_name(name)
if parts and parts[1]:
name, subpkg_key = parts
validate_key(subpkg_key)
else:
subpkg_key = None
pkg = cls._browse(name=name, registry=registry, top_hash=top_hash)
message = pkg._meta.get('message', None) # propagate the package message
file_list = []
if subpkg_key is not None:
if subpkg_key not in pkg:
raise QuiltException(f"Package {name} doesn't contain {subpkg_key!r}.")
entry = pkg[subpkg_key]
entries = entry.walk() if isinstance(entry, Package) else ((subpkg_key.split('/')[-1], entry),)
else:
entries = pkg.walk()
for logical_key, entry in entries:
# Copy the datafiles in the package.
physical_key = entry.physical_key
# Try a local cache.
cached_file = ObjectPathCache.get(str(physical_key))
if cached_file is not None:
physical_key = PhysicalKey.from_path(cached_file)
new_physical_key = dest_parsed.join(logical_key)
if physical_key != new_physical_key:
file_list.append((physical_key, new_physical_key, entry.size))
def _maybe_add_to_cache(old: PhysicalKey, new: PhysicalKey, _):
if not old.is_local() and new.is_local():
ObjectPathCache.set(str(old), new.path)
copy_file_list(file_list, callback=_maybe_add_to_cache, message="Copying objects")
pkg._build(name, registry=dest_registry, message=message)
if top_hash is None:
top_hash = pkg.top_hash
short_tophash = Package._shorten_tophash(name, dest_registry_parsed, top_hash)
print(f"Successfully installed package '{name}', tophash={short_tophash} from {registry}")
@classmethod
def resolve_hash(cls, registry, hash_prefix):
"""
Find a hash that starts with a given prefix.
Args:
registry(string): location of registry
hash_prefix(string): hash prefix with length between 6 and 64 characters
"""
assert isinstance(registry, PhysicalKey)
if len(hash_prefix) == 64:
top_hash = hash_prefix
elif 6 <= len(hash_prefix) < 64:
matching_hashes = [h for h, _
in list_url(registry.join('.quilt/packages/'))
if h.startswith(hash_prefix)]
if not matching_hashes:
raise QuiltException("Found zero matches for %r" % hash_prefix)
elif len(matching_hashes) > 1:
raise QuiltException("Found multiple matches: %r" % hash_prefix)
else:
top_hash = matching_hashes[0]
else:
raise QuiltException("Invalid hash: %r" % hash_prefix)
return top_hash
@classmethod
def _shorten_tophash(cls, package_name, registry: PhysicalKey, top_hash):
min_shorthash_len = 7
matches = [h for h, _ in list_url(registry.join('.quilt/packages/'))
if h.startswith(top_hash[:min_shorthash_len])]
if len(matches) == 0:
raise ValueError(f"Tophash {top_hash} was not found in registry {registry}")
for prefix_length in range(min_shorthash_len, 64):
potential_shorthash = top_hash[:prefix_length]
matches = [h for h in matches if h.startswith(potential_shorthash)]
if len(matches) == 1:
return potential_shorthash
@classmethod
@ApiTelemetry("package.browse")
def browse(cls, name, registry=None, top_hash=None):
"""
Load a package into memory from a registry without making a local copy of
the manifest.
Args:
name(string): name of package to load
registry(string): location of registry to load package from
top_hash(string): top hash of package version to load
"""
return cls._browse(name=name, registry=registry, top_hash=top_hash)
@classmethod
def _browse(cls, name, registry=None, top_hash=None):
validate_package_name(name)
if registry is None:
registry = get_from_config('default_local_registry')
else:
registry = fix_url(registry)
registry_parsed = PhysicalKey.from_url(registry)
if top_hash is None:
top_hash_file = registry_parsed.join(f'.quilt/named_packages/{name}/latest')
top_hash = get_bytes(top_hash_file).decode('utf-8').strip()
else:
top_hash = cls.resolve_hash(registry_parsed, top_hash)
# TODO: verify that name is correct with respect to this top_hash
pkg_manifest = registry_parsed.join(f'.quilt/packages/{top_hash}')
if pkg_manifest.is_local():
local_pkg_manifest = pkg_manifest.path
else:
local_pkg_manifest = CACHE_PATH / "manifest" / _filesystem_safe_encode(str(pkg_manifest))
if not local_pkg_manifest.exists():
# Copy to a temporary file first, to make sure we don't cache a truncated file
# if the download gets interrupted.
tmp_path = local_pkg_manifest.with_suffix('.tmp')
copy_file(pkg_manifest, PhysicalKey.from_path(tmp_path), message="Downloading manifest")
tmp_path.rename(local_pkg_manifest)
return cls._from_path(local_pkg_manifest)
@classmethod
def _from_path(cls, path):
""" Takes a path and returns a package loaded from that path"""
with open(path) as open_file:
pkg = cls._load(open_file)
return pkg
@classmethod
def _split_key(cls, logical_key):
"""
Converts a string logical key like 'a/b/c' into a list of ['a', 'b', 'c'].
Returns the original key if it's already a list or a tuple.
"""
if isinstance(logical_key, str):
path = logical_key.split('/')
elif isinstance(logical_key, (tuple, list)):
path = logical_key
else:
raise TypeError('Invalid logical_key: %r' % logical_key)
return path
def __contains__(self, logical_key):
"""
Checks whether the package contains a specified logical_key.
Returns:
True or False
"""
try:
self[logical_key]
return True
except KeyError:
return False
def __getitem__(self, logical_key):
"""
Filters the package based on prefix, and returns either a new Package
or a PackageEntry.
Args:
prefix(str): prefix to filter on
Returns:
PackageEntry if prefix matches a logical_key exactly
otherwise Package
"""
pkg = self
for key_fragment in self._split_key(logical_key):
pkg = pkg._children[key_fragment]
return pkg
@ApiTelemetry("package.fetch")
def fetch(self, dest='./'):
"""
Copy all descendants to `dest`. Descendants are written under their logical
names _relative_ to self.
Args:
dest: where to put the files (locally)
Returns:
A new Package object with entries from self, but with physical keys
pointing to files in `dest`.
"""
nice_dest = PhysicalKey.from_url(fix_url(dest))
file_list = []
pkg = Package()
for logical_key, entry in self.walk():
physical_key = entry.physical_key
new_physical_key = nice_dest.join(logical_key)
file_list.append((physical_key, new_physical_key, entry.size))
# return a package reroot package physical keys after the copy operation succeeds
# see GH#388 for context
new_entry = entry.with_physical_key(new_physical_key)
pkg._set(logical_key, new_entry)
copy_file_list(file_list, message="Copying objects")
return pkg
def keys(self):
"""
Returns logical keys in the package.
"""
return self._children.keys()
def __iter__(self):
return iter(self._children)
def __len__(self):
return len(self._children)
def walk(self):
"""
Generator that traverses all entries in the package tree and returns tuples of (key, entry),
with keys in alphabetical order.
"""
for name, child in sorted(self._children.items()):
if isinstance(child, PackageEntry):
yield name, child
else:
for key, value in child.walk():
yield name + '/' + key, value
def _walk_dir_meta(self):
"""
Generator that traverses all entries in the package tree and returns
tuples of (key, meta) for each directory with metadata.
Keys will all end in '/' to indicate that they are directories.
"""
for key, child in sorted(self._children.items()):
if isinstance(child, PackageEntry):
continue
meta = child.meta
if meta:
yield key + '/', meta
for child_key, child_meta in child._walk_dir_meta():
yield key + '/' + child_key, child_meta
@classmethod
@ApiTelemetry("package.load")
def load(cls, readable_file):
"""
Loads a package from a readable file-like object.
Args:
readable_file: readable file-like object to deserialize package from
Returns:
A new Package object
Raises:
file not found
json decode error
invalid package exception
"""
return cls._load(readable_file=readable_file)
@classmethod
def _load(cls, readable_file):
gc.disable() # Experiments with COCO (650MB manifest) show disabling GC gives us ~2x performance improvement
try:
line_count = 0
for _ in readable_file:
line_count += 1
readable_file.seek(0)
reader = jsonlines.Reader(
tqdm(readable_file, desc="Loading manifest", total=line_count, unit="entries", disable=DISABLE_TQDM),
loads=json.loads,
)
meta = reader.read()
meta.pop('top_hash', None) # Obsolete as of PR #130
pkg = cls()
pkg._meta = meta
for obj in reader:
path = cls._split_key(obj.pop('logical_key'))
subpkg = pkg._ensure_subpackage(path[:-1])
key = path[-1]
if not obj.get('physical_keys', None):
# directory-level metadata
subpkg.set_meta(obj['meta'])
continue
if key in subpkg._children:
raise PackageException("Duplicate logical key while loading package")
subpkg._children[key] = PackageEntry(
PhysicalKey.from_url(obj['physical_keys'][0]),
obj['size'],
obj['hash'],
obj['meta'],
)
finally:
gc.enable()
return pkg
def set_dir(self, lkey, path=None, meta=None):
"""
Adds all files from `path` to the package.
Recursively enumerates every file in `path`, and adds them to
the package according to their relative location to `path`.
Args:
lkey(string): prefix to add to every logical key,
use '/' for the root of the package.
path(string): path to scan for files to add to package.
If None, lkey will be substituted in as the path.
meta(dict): user level metadata dict to attach to lkey directory entry.
Returns:
self
Raises:
When `path` doesn't exist
"""
lkey = lkey.strip("/")
if not lkey or lkey == '.' or lkey == './':
root = self
else:
validate_key(lkey)
root = self._ensure_subpackage(self._split_key(lkey))
root.set_meta(meta)
if path:
src = PhysicalKey.from_url(fix_url(path))
else:
src = PhysicalKey.from_path(lkey)
# TODO: deserialization metadata
if src.is_local():
src_path = pathlib.Path(src.path)
if not src_path.is_dir():
raise PackageException("The specified directory doesn't exist")
files = src_path.rglob('*')
ignore = src_path / '.quiltignore'
if ignore.exists():
files = quiltignore_filter(files, ignore, 'file')
for f in files:
if not f.is_file():
continue
entry = PackageEntry(PhysicalKey.from_path(f), f.stat().st_size, None, None)
logical_key = f.relative_to(src_path).as_posix()
root._set(logical_key, entry)
else:
if src.version_id is not None:
raise PackageException("Directories cannot have versions")
src_path = src.path
if src.basename() != '':
src_path += '/'
objects, _ = list_object_versions(src.bucket, src_path)
for obj in objects:
if not obj['IsLatest']:
continue
# Skip S3 pseduo directory files and Keys that end in /
if obj['Key'].endswith('/'):
if obj['Size'] != 0:
warnings.warn(f'Logical keys cannot end in "/", skipping: {obj["Key"]}')
continue
obj_pk = PhysicalKey(src.bucket, obj['Key'], obj.get('VersionId'))
entry = PackageEntry(obj_pk, obj['Size'], None, None)
logical_key = obj['Key'][len(src_path):]
root._set(logical_key, entry)
return self
def get(self, logical_key):
"""
Gets object from logical_key and returns its physical path.
Equivalent to self[logical_key].get().
Args:
logical_key(string): logical key of the object to get
Returns:
Physical path as a string.
Raises:
KeyError: when logical_key is not present in the package
ValueError: if the logical_key points to a Package rather than PackageEntry.
"""
obj = self[logical_key]
if not isinstance(obj, PackageEntry):
raise ValueError("Key does not point to a PackageEntry")
return obj.get()
def readme(self):
"""
Returns the README PackageEntry
The README is the entry with the logical key 'README.md' (case-sensitive). Will raise a QuiltException if
no such entry exists.
"""
if "README.md" not in self:
ex_msg = "This Package is missing a README file. A Quilt recognized README file is a file named " \
"'README.md' (case-insensitive)"
raise QuiltException(ex_msg)
return self["README.md"]
def set_meta(self, meta):
"""
Sets user metadata on this Package.
"""
self._meta['user_meta'] = meta
return self
def _fix_sha256(self):
"""
Calculate and set missing hash values
"""
self._incomplete_entries = [entry for key, entry in self.walk() if entry.hash is None]
physical_keys = []
sizes = []
for entry in self._incomplete_entries:
physical_keys.append(entry.physical_key)
sizes.append(entry.size)
results = calculate_sha256(physical_keys, sizes)
exc = None
for entry, obj_hash in zip(self._incomplete_entries, results):
if isinstance(obj_hash, Exception):
exc = obj_hash
else:
entry.hash = dict(type='SHA256', value=obj_hash)
if exc:
incomplete_manifest_path = self._dump_manifest_to_scratch()
msg = "Unable to reach S3 for some hash values. Incomplete manifest saved to {path}."
raise PackageException(msg.format(path=incomplete_manifest_path)) from exc
def _set_commit_message(self, msg):
"""
Sets a commit message.
Args:
msg: a message string
Returns:
None
Raises:
a ValueError if msg is not a string
"""
if msg is not None and not isinstance(msg, str):
raise ValueError(
f"The package commit message must be a string, but the message provided is an "
f"instance of {type(msg)}."
)
self._meta.update({'message': msg})
def _dump_manifest_to_scratch(self):
registry = get_from_config('default_local_registry')
registry_parsed = PhysicalKey.from_url(registry)
pkg_manifest_file = registry_parsed.join("scratch").join(str(int(time.time())))
manifest = io.BytesIO()
self._dump(manifest)
put_bytes(
manifest.getvalue(),
pkg_manifest_file
)
return pkg_manifest_file.path
@ApiTelemetry("package.build")
def build(self, name, registry=None, message=None):
"""
Serializes this package to a registry.
Args:
name: optional name for package
registry: registry to build to
defaults to local registry
message: the commit message of the package
Returns:
The top hash as a string.
"""
return self._build(name=name, registry=registry, message=message)
def _build(self, name, registry, message):
validate_package_name(name)
if registry is None:
registry = get_from_config('default_local_registry')
else:
registry = fix_url(registry)
registry_parsed = PhysicalKey.from_url(registry)
self._set_commit_message(message)
self._fix_sha256()
manifest = io.BytesIO()
self._dump(manifest)
pkg_manifest_file = registry_parsed.join(f'.quilt/packages/{self.top_hash}')
put_bytes(
manifest.getvalue(),
pkg_manifest_file
)
named_path = registry_parsed.join(f'.quilt/named_packages/{name}')
hash_bytes = self.top_hash.encode('utf-8')
# TODO: use a float to string formater instead of double casting
timestamp_path = named_path.join(str(int(time.time())))
latest_path = named_path.join("latest")
put_bytes(hash_bytes, timestamp_path)
put_bytes(hash_bytes, latest_path)
return self
@ApiTelemetry("package.dump")
def dump(self, writable_file):
"""
Serializes this package to a writable file-like object.
Args:
writable_file: file-like object to write serialized package.
Returns:
None
Raises:
fail to create file
fail to finish write
"""
return self._dump(writable_file)
def _dump(self, writable_file):
writer = jsonlines.Writer(writable_file)
for line in self.manifest:
writer.write(line)
@property
def manifest(self):
"""
Provides a generator of the dicts that make up the serialized package.
"""
yield self._meta
for dir_key, meta in self._walk_dir_meta():
yield {'logical_key': dir_key, 'meta': meta}
for logical_key, entry in self.walk():
yield {'logical_key': logical_key, **entry.as_dict()}
def set(self, logical_key, entry=None, meta=None, serialization_location=None, serialization_format_opts=None):
"""
Returns self with the object at logical_key set to entry.
Args:
logical_key(string): logical key to update
entry(PackageEntry OR string OR object): new entry to place at logical_key in the package.
If entry is a string, it is treated as a URL, and an entry is created based on it.
If entry is None, the logical key string will be substituted as the entry value.
If entry is an object and quilt knows how to serialize it, it will immediately be serialized and
written to disk, either to serialization_location or to a location managed by quilt. List of types that
Quilt can serialize is available by calling `quilt3.formats.FormatRegistry.all_supported_formats()`
meta(dict): user level metadata dict to attach to entry
serialization_format_opts(dict): Optional. If passed in, only used if entry is an object. Options to help
Quilt understand how the object should be serialized. Useful for underspecified file formats like csv
when content contains confusing characters. Will be passed as kwargs to the FormatHandler.serialize()
function. See docstrings for individual FormatHandlers for full list of options -
https://github.com/quiltdata/quilt/blob/master/api/python/quilt3/formats.py
serialization_location(string): Optional. If passed in, only used if entry is an object. Where the
serialized object should be written, e.g. "./mydataframe.parquet"
Returns:
self
"""
return self._set(logical_key=logical_key,
entry=entry,
meta=meta,
serialization_location=serialization_location,
serialization_format_opts=serialization_format_opts)
def _set(self, logical_key, entry=None, meta=None, serialization_location=None, serialization_format_opts=None):
if not logical_key or logical_key.endswith('/'):
raise QuiltException(
f"Invalid logical key {logical_key!r}. "
f"A package entry logical key cannot be a directory."
)
validate_key(logical_key)
if entry is None:
entry = pathlib.Path(logical_key).resolve().as_uri()
if isinstance(entry, (str, os.PathLike)):
src = PhysicalKey.from_url(fix_url(str(entry)))
size, version_id = get_size_and_version(src)
# Determine if a new version needs to be appended.
if not src.is_local() and src.version_id is None and version_id is not None:
src.version_id = version_id
entry = PackageEntry(src, size, None, None)
elif isinstance(entry, PackageEntry):
assert meta is None
elif FormatRegistry.object_is_serializable(entry):
# Use file extension from serialization_location, fall back to file extension from logical_key
# If neither has a file extension, Quilt picks the serialization format.
logical_key_ext = extract_file_extension(logical_key)
serialize_loc_ext = None
if serialization_location is not None:
serialize_loc_ext = extract_file_extension(serialization_location)
if logical_key_ext is not None and serialize_loc_ext is not None:
assert logical_key_ext == serialize_loc_ext, f"The logical_key and the serialization_location have " \
f"different file extensions: {logical_key_ext} vs " \
f"{serialize_loc_ext}. Quilt doesn't know which to use!"
if serialize_loc_ext is not None:
ext = serialize_loc_ext
elif logical_key_ext is not None:
ext = logical_key_ext
else:
ext = None
format_handlers = FormatRegistry.search(type(entry))
if ext:
format_handlers = [f for f in format_handlers if ext in f.handled_extensions]
if len(format_handlers) == 0:
error_message = f'Quilt does not know how to serialize a {type(entry)}'
if ext is not None:
error_message += f' as a {ext} file.'
error_message += '. If you think this should be supported, please open an issue or PR at ' \
'https://github.com/quiltdata/quilt'
raise QuiltException(error_message)
if serialization_format_opts is None:
serialization_format_opts = {}
serialized_object_bytes, new_meta = format_handlers[0].serialize(entry, meta=None, ext=ext,
**serialization_format_opts)
if serialization_location is None:
serialization_path = APP_DIR_TEMPFILE_DIR / str(uuid.uuid4())
if ext:
serialization_path = serialization_path.with_suffix(f'.{ext}')
else:
serialization_path = pathlib.Path(serialization_location).expanduser().resolve()
serialization_path.parent.mkdir(exist_ok=True, parents=True)
serialization_path.write_bytes(serialized_object_bytes)
size = serialization_path.stat().st_size
write_pk = PhysicalKey.from_path(serialization_path)
entry = PackageEntry(write_pk, size, hash_obj=None, meta=new_meta)
else:
raise TypeError(f"Expected a string for entry, but got an instance of {type(entry)}.")
if meta is not None:
entry.set_meta(meta)
path = self._split_key(logical_key)
pkg = self._ensure_subpackage(path[:-1], ensure_no_entry=True)
if path[-1] in pkg and isinstance(pkg[path[-1]], Package):
raise QuiltException("Cannot overwrite directory with PackageEntry")
pkg._children[path[-1]] = entry
return self
def _ensure_subpackage(self, path, ensure_no_entry=False):
"""
Creates a package and any intermediate packages at the given path.
Args:
path(list): logical key as a list or tuple
ensure_no_entry(boolean): if True, throws if this would overwrite
a PackageEntry that already exists in the tree.
Returns:
newly created or existing package at that path
"""
pkg = self
for key_fragment in path:
if ensure_no_entry and key_fragment in pkg \
and isinstance(pkg[key_fragment], PackageEntry):
raise QuiltException("Already a PackageEntry along the path.")
pkg = pkg._children.setdefault(key_fragment, Package())
return pkg
def delete(self, logical_key):
"""
Returns the package with logical_key removed.
Returns:
self
Raises:
KeyError: when logical_key is not present to be deleted
"""
path = self._split_key(logical_key)
pkg = self[path[:-1]]
del pkg._children[path[-1]]
return self
@property
def top_hash(self):
"""
Returns the top hash of the package.
Note that physical keys are not hashed because the package has
the same semantics regardless of where the bytes come from.
Returns:
A string that represents the top hash of the package
"""
top_hash = hashlib.sha256()
assert 'top_hash' not in self._meta
top_meta = json.dumps(self._meta, sort_keys=True, separators=(',', ':'))
top_hash.update(top_meta.encode('utf-8'))
for logical_key, entry in self.walk():
if entry.hash is None or entry.size is None:
raise QuiltException(
"PackageEntry missing hash and/or size: %s" % entry.physical_key
)
entry_dict = entry.as_dict()
entry_dict['logical_key'] = logical_key
entry_dict.pop('physical_keys', None)
entry_dict_str = json.dumps(entry_dict, sort_keys=True, separators=(',', ':'))
top_hash.update(entry_dict_str.encode('utf-8'))
return top_hash.hexdigest()
@ApiTelemetry("package.push")
def push(self, name, registry=None, dest=None, message=None, selector_fn=None):
"""
Copies objects to path, then creates a new package that points to those objects.
Copies each object in this package to path according to logical key structure,
then adds to the registry a serialized version of this package with
physical keys that point to the new copies.
Note that push is careful to not push data unnecessarily. To illustrate, imagine you have
a PackageEntry: `pkg["entry_1"].physical_key = "/tmp/package_entry_1.json"`
If that entry would be pushed to `s3://bucket/prefix/entry_1.json`, but
`s3://bucket/prefix/entry_1.json` already contains the exact same bytes as
'/tmp/package_entry_1.json', `quilt3` will not push the bytes to s3, no matter what
`selector_fn('entry_1', pkg["entry_1"])` returns.
However, selector_fn will dictate whether the new package points to the local file or to s3:
If `selector_fn('entry_1', pkg["entry_1"]) == False`,
`new_pkg["entry_1"] = ["/tmp/package_entry_1.json"]`
If `selector_fn('entry_1', pkg["entry_1"]) == True`,
`new_pkg["entry_1"] = ["s3://bucket/prefix/entry_1.json"]`
Args:
name: name for package in registry
dest: where to copy the objects in the package
registry: registry where to create the new package
message: the commit message for the new package
selector_fn: An optional function that determines which package entries should be copied to S3.
The function takes in two arguments, logical_key and package_entry, and should return False if that
PackageEntry should be skipped during push. If for example you have a package where the files
are spread over multiple buckets and you add a single local file, you can use selector_fn to
only push the local file to s3 (instead of pushing all data to the destination bucket).
Returns:
A new package that points to the copied objects.
"""
if selector_fn is None:
def selector_fn(*args):
return True
validate_package_name(name)
if registry is None:
registry = get_from_config('default_remote_registry')
if registry is None:
raise QuiltException(
"No registry specified and no default remote registry configured. Please "
"specify a registry or configure a default remote registry with quilt3.config"
)
registry_parsed = PhysicalKey.from_url(fix_url(registry))
else:
registry_parsed = PhysicalKey.from_url(fix_url(registry))
if not registry_parsed.is_local():
if registry_parsed.path != '':
raise QuiltException(
f"The 'registry' argument expects an S3 bucket but the S3 object path "
f"{registry!r} was provided instead. You probably wanted to set "
f"'registry' to {'s3://' + registry_parsed.bucket!r} instead. To specify that package "
f"data land in a specific directory use 'dest'."
)
else:
raise QuiltException(
f"Can only 'push' to remote registries in S3, but {registry!r} "
f"is a local file. To store a package in the local registry, use "
f"'build' instead."
)
if dest is None:
dest_parsed = registry_parsed.join(name)
else:
dest_parsed = PhysicalKey.from_url(fix_url(dest))
if dest_parsed.bucket != registry_parsed.bucket:
raise QuiltException(
f"Invalid package destination path {dest!r}. 'dest', if set, must be a path "
f"in the {registry!r} package registry specified by 'registry'."
)
self._fix_sha256()
pkg = self.__class__()
pkg._meta = self._meta
# Since all that is modified is physical keys, pkg will have the same top hash
file_list = []
entries = []
for logical_key, entry in self.walk():
if not selector_fn(logical_key, entry):
pkg._set(logical_key, entry)
continue
# Copy the datafiles in the package.
physical_key = entry.physical_key
new_physical_key = dest_parsed.join(logical_key)
if (
physical_key.bucket == new_physical_key.bucket and
physical_key.path == new_physical_key.path
):
# No need to copy - re-use the original physical key.
pkg._set(logical_key, entry)
else:
entries.append((logical_key, entry))
file_list.append((physical_key, new_physical_key, entry.size))
results = copy_file_list(file_list, message="Copying objects")
for (logical_key, entry), versioned_key in zip(entries, results):
# Create a new package entry pointing to the new remote key.
assert versioned_key is not None
new_entry = entry.with_physical_key(versioned_key)
pkg._set(logical_key, new_entry)
def physical_key_is_temp_file(pk):
if not pk.is_local():
return False
return pathlib.Path(pk.path).parent == APP_DIR_TEMPFILE_DIR
temp_file_logical_keys = [lk for lk, entry in self.walk() if physical_key_is_temp_file(entry.physical_key)]
temp_file_physical_keys = [self[lk].physical_key for lk in temp_file_logical_keys]
# Now that data has been pushed, delete tmp files created by pkg.set('KEY', obj)
with Pool(10) as p:
p.map(_delete_local_physical_key, temp_file_physical_keys)
# Update old package to point to the materialized location of the file since the tempfile no longest exists
for lk in temp_file_logical_keys:
self._set(lk, pkg[lk])
pkg._build(name, registry=registry, message=message)
shorthash = Package._shorten_tophash(name, PhysicalKey.from_url(registry), pkg.top_hash)
print(f"Package {name}@{shorthash} pushed to s3://{dest_parsed.bucket}")
if user_is_configured_to_custom_stack():
navigator_url = get_from_config("navigator_url")
print(f"Successfully pushed the new package to "
f"{catalog_package_url(navigator_url, dest_parsed.bucket, name)}")
else:
dest_s3_url = str(dest_parsed)
if not dest_s3_url.endswith("/"):
dest_s3_url += "/"
print(f"Run `quilt3 catalog {dest_s3_url}` to browse.")
print("Successfully pushed the new package")
return pkg
@classmethod
def rollback(cls, name, registry, top_hash):
"""
Set the "latest" version to the given hash.
Args:
name(str): Name of package to rollback.
registry(str): Registry where package is located.
top_hash(str): Hash to rollback to.
"""
registry = PhysicalKey.from_url(fix_url(registry))
validate_package_name(name)
top_hash = cls.resolve_hash(registry, top_hash)
hash_path = registry.join(f'.quilt/packages/{top_hash}')
latest_path = registry.join(f'.quilt/named_packages/{name}/latest')
# Check that both latest and top_hash actually exist.
get_size_and_version(hash_path)
get_size_and_version(latest_path)
put_bytes(top_hash.encode('utf-8'), latest_path)
@ApiTelemetry("package.diff")
def diff(self, other_pkg):
"""
Returns three lists -- added, modified, deleted.
Added: present in other_pkg but not in self.
Modified: present in both, but different.
Deleted: present in self, but not other_pkg.
Args:
other_pkg: Package to diff
Returns:
added, modified, deleted (all lists of logical keys)
"""
deleted = []
modified = []
other_entries = dict(other_pkg.walk())
for lk, entry in self.walk():
other_entry = other_entries.pop(lk, None)
if other_entry is None:
deleted.append(lk)
elif entry != other_entry:
modified.append(lk)
added = list(sorted(other_entries))
return added, modified, deleted
@ApiTelemetry("package.map")
def map(self, f, include_directories=False):
"""
Performs a user-specified operation on each entry in the package.
Args:
f(x, y): function
The function to be applied to each package entry.
It should take two inputs, a logical key and a PackageEntry.
include_directories: bool
Whether or not to include directory entries in the map.
Returns: list
The list of results generated by the map.
"""
return self._map(f, include_directories=include_directories)
def _map(self, f, include_directories=False):
if include_directories:
for lk, _ in self._walk_dir_meta():
yield f(lk, self[lk.rstrip("/")])
for lk, entity in self.walk():
yield f(lk, entity)
@ApiTelemetry("package.filter")
def filter(self, f, include_directories=False):
"""
Applies a user-specified operation to each entry in the package,
removing results that evaluate to False from the output.
Args:
f(x, y): function
The function to be applied to each package entry.
It should take two inputs, a logical key and a PackageEntry.
This function should return a boolean.
include_directories: bool
Whether or not to include directory entries in the map.
Returns:
A new package with entries that evaluated to False removed
"""
return self._filter(f=f, include_directories=include_directories)
def _filter(self, f, include_directories=False):
p = Package()
excluded_dirs = set()
if include_directories:
for lk, _ in self._walk_dir_meta():
if not f(lk, self[lk.rstrip("/")]):
excluded_dirs.add(lk)
for lk, entity in self.walk():
if (not any(p in excluded_dirs
for p in pathlib.PurePosixPath(lk).parents)
and f(lk, entity)):
p._set(lk, entity)
return p
def verify(self, src, extra_files_ok=False):
"""
Check if the contents of the given directory matches the package manifest.
Args:
src(str): URL of the directory
extra_files_ok(bool): Whether extra files in the directory should cause a failure.
Returns:
True if the package matches the directory; False otherwise.
"""
src = PhysicalKey.from_url(fix_url(src))
src_dict = dict(list_url(src))
url_list = []
size_list = []
for logical_key, entry in self.walk():
src_size = src_dict.pop(logical_key, None)
if src_size is None:
return False
if entry.size != src_size:
return False
entry_url = src.join(logical_key)
url_list.append(entry_url)
size_list.append(src_size)
if src_dict and not extra_files_ok:
return False
hash_list = calculate_sha256(url_list, size_list)
for (logical_key, entry), url_hash in zip(self.walk(), hash_list):
if isinstance(url_hash, Exception):
raise url_hash
if entry.hash['value'] != url_hash:
return False
return True
| 1 | 18,668 | we already have .meta() so I don't think we want get_meta() for PackageEntry | quiltdata-quilt | py |
@@ -83,7 +83,8 @@ public abstract class CoprocessorIterator<T> implements Iterator<T> {
session,
SchemaInfer.create(dagRequest),
dagRequest.getPushDownType(),
- dagRequest.getStoreType()) {
+ dagRequest.getStoreType(),
+ dagRequest.getStartTs().getVersion()) {
@Override
public Row next() {
return rowReader.readRow(schemaInfer.getTypes().toArray(new DataType[0])); | 1 | /*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pingcap.tikv.operation.iterator;
import static java.util.Objects.requireNonNull;
import com.pingcap.tidb.tipb.Chunk;
import com.pingcap.tidb.tipb.DAGRequest;
import com.pingcap.tidb.tipb.EncodeType;
import com.pingcap.tikv.TiSession;
import com.pingcap.tikv.codec.CodecDataInput;
import com.pingcap.tikv.columnar.BatchedTiChunkColumnVector;
import com.pingcap.tikv.columnar.TiChunk;
import com.pingcap.tikv.columnar.TiChunkColumnVector;
import com.pingcap.tikv.columnar.TiColumnVector;
import com.pingcap.tikv.columnar.TiRowColumnVector;
import com.pingcap.tikv.meta.TiDAGRequest;
import com.pingcap.tikv.operation.SchemaInfer;
import com.pingcap.tikv.row.Row;
import com.pingcap.tikv.row.RowReader;
import com.pingcap.tikv.row.RowReaderFactory;
import com.pingcap.tikv.types.DataType;
import com.pingcap.tikv.util.RangeSplitter.RegionTask;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
public abstract class CoprocessorIterator<T> implements Iterator<T> {
protected final TiSession session;
protected final List<RegionTask> regionTasks;
protected final DAGRequest dagRequest;
protected final DataType[] handleTypes;
protected RowReader rowReader;
protected CodecDataInput dataInput;
protected boolean eof = false;
protected int taskIndex;
protected int chunkIndex;
protected List<Chunk> chunkList;
protected SchemaInfer schemaInfer;
CoprocessorIterator(
DAGRequest req, List<RegionTask> regionTasks, TiSession session, SchemaInfer infer) {
this.dagRequest = req;
this.session = session;
this.regionTasks = regionTasks;
this.schemaInfer = infer;
this.handleTypes = infer.getTypes().toArray(new DataType[] {});
}
abstract void submitTasks();
/**
* Build a DAGIterator from TiDAGRequest and region tasks to get rows
*
* <p>When we are preforming a scan request using coveringIndex, {@link
* com.pingcap.tidb.tipb.IndexScan} should be used to read index rows. In other circumstances,
* {@link com.pingcap.tidb.tipb.TableScan} is used to scan table rows.
*
* @param req TiDAGRequest built
* @param regionTasks a list or RegionTask each contains a task on a single region
* @param session TiSession
* @return a DAGIterator to be processed
*/
public static CoprocessorIterator<Row> getRowIterator(
TiDAGRequest req, List<RegionTask> regionTasks, TiSession session) {
TiDAGRequest dagRequest = req.copy();
return new DAGIterator<Row>(
dagRequest.buildTableScan(),
regionTasks,
session,
SchemaInfer.create(dagRequest),
dagRequest.getPushDownType(),
dagRequest.getStoreType()) {
@Override
public Row next() {
return rowReader.readRow(schemaInfer.getTypes().toArray(new DataType[0]));
}
};
}
/**
* Build a DAGIterator from TiDAGRequest and region tasks to get rows
*
* <p>When we are preforming a scan request using coveringIndex, {@link
* com.pingcap.tidb.tipb.IndexScan} should be used to read index rows. In other circumstances,
* {@link com.pingcap.tidb.tipb.TableScan} is used to scan table rows.
*
* @param req TiDAGRequest built
* @param regionTasks a list or RegionTask each contains a task on a single region
* @param session TiSession
* @return a DAGIterator to be processed
*/
public static CoprocessorIterator<TiChunk> getTiChunkIterator(
TiDAGRequest req, List<RegionTask> regionTasks, TiSession session, int numOfRows) {
TiDAGRequest dagRequest = req.copy();
return new DAGIterator<TiChunk>(
dagRequest.buildTableScan(),
regionTasks,
session,
SchemaInfer.create(dagRequest),
dagRequest.getPushDownType(),
dagRequest.getStoreType()) {
@Override
public TiChunk next() {
DataType[] dataTypes = this.schemaInfer.getTypes().toArray(new DataType[0]);
// TODO tiColumnarBatch is meant to be reused in the entire data loading process.
if (this.encodeType == EncodeType.TypeDefault) {
Row[] rows = new Row[numOfRows];
int count = 0;
for (int i = 0; i < rows.length && hasNext(); i++) {
rows[i] = rowReader.readRow(dataTypes);
count += 1;
}
TiRowColumnVector[] columnarVectors = new TiRowColumnVector[dataTypes.length];
for (int i = 0; i < dataTypes.length; i++) {
columnarVectors[i] = new TiRowColumnVector(dataTypes[i], i, rows, count);
}
return new TiChunk(columnarVectors);
} else {
TiColumnVector[] columnarVectors = new TiColumnVector[dataTypes.length];
List<List<TiChunkColumnVector>> childColumnVectors = new ArrayList<>();
for (int i = 0; i < dataTypes.length; i++) {
childColumnVectors.add(new ArrayList<>());
}
int count = 0;
// hasNext will create an dataInput which is our datasource.
// TODO(Zhexuan Yang) we need control memory limit in case of out of memory error
for (; count < numOfRows && hasNext(); ) {
for (int i = 0; i < dataTypes.length; i++) {
childColumnVectors.get(i).add(dataTypes[i].decodeColumn(dataInput));
}
int size = childColumnVectors.get(0).size();
count += childColumnVectors.get(0).get(size - 1).numOfRows();
// left data should be trashed.
dataInput = new CodecDataInput(new byte[0]);
}
for (int i = 0; i < dataTypes.length; i++) {
columnarVectors[i] = new BatchedTiChunkColumnVector(childColumnVectors.get(i), count);
}
return new TiChunk(columnarVectors);
}
}
};
}
/**
* Build a DAGIterator from TiDAGRequest and region tasks to get handles
*
* <p>When we use getHandleIterator, we must be preforming a IndexScan.
*
* @param req TiDAGRequest built
* @param regionTasks a list or RegionTask each contains a task on a single region
* @param session TiSession
* @return a DAGIterator to be processed
*/
public static CoprocessorIterator<Long> getHandleIterator(
TiDAGRequest req, List<RegionTask> regionTasks, TiSession session) {
return new DAGIterator<Long>(
req.buildIndexScan(),
regionTasks,
session,
SchemaInfer.create(req, true),
req.getPushDownType(),
req.getStoreType()) {
@Override
public Long next() {
return rowReader.readRow(handleTypes).getLong(handleTypes.length - 1);
}
};
}
boolean tryAdvanceChunkIndex() {
if (chunkList == null || chunkIndex >= chunkList.size() - 1) {
return false;
}
chunkIndex++;
return true;
}
void createDataInputReader() {
requireNonNull(chunkList, "Chunk list should not be null.");
if (0 > chunkIndex || chunkIndex >= chunkList.size()) {
throw new IllegalArgumentException();
}
dataInput = new CodecDataInput(chunkList.get(chunkIndex).getRowsData());
rowReader = RowReaderFactory.createRowReader(dataInput);
}
}
| 1 | 12,028 | question: can we always get the startts from dagrequest? | pingcap-tispark | java |
@@ -114,9 +114,11 @@ describe( 'AccountSelect', () => {
it( 'should pre-select the property and profile IDs when changed', () => {
const { accounts, properties, profiles } = fixtures.accountsPropertiesProfiles;
const { getByText, container, registry } = render( <AccountSelect />, { setupRegistry } );
+ const propertyID = fixtures.accountsPropertiesProfiles.properties[ 0 ].id;
+ const accountID = fixtures.accountsPropertiesProfiles.properties[ 0 ].accountId;
- registry.dispatch( STORE_NAME ).receiveProperties( properties );
- registry.dispatch( STORE_NAME ).receiveProfiles( profiles );
+ registry.dispatch( STORE_NAME ).receiveProperties( properties, { accountID } );
+ registry.dispatch( STORE_NAME ).receiveProfiles( profiles, { propertyID } );
// Click the label to expose the elements in the menu.
fireEvent.click( container.querySelector( '.mdc-floating-label' ) ); | 1 | /**
* Account Select component tests.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import apiFetchMock from '@wordpress/api-fetch';
/**
* Internal dependencies
*/
import AccountSelect from './account-select';
import { fireEvent, muteConsole, render } from '../../../../../tests/js/test-utils';
import { STORE_NAME, ACCOUNT_CREATE } from '../datastore/constants';
import * as fixtures from '../datastore/__fixtures__';
// Mock apiFetch so we know if it's called.
jest.mock( '@wordpress/api-fetch' );
apiFetchMock.mockImplementation( ( ...args ) => {
// eslint-disable-next-line no-console
console.warn( 'apiFetch', ...args );
} );
const setupRegistry = ( registry ) => {
registry.dispatch( STORE_NAME ).setSettings( {} );
registry.dispatch( STORE_NAME ).receiveAccounts( fixtures.accountsPropertiesProfiles.accounts );
registry.dispatch( STORE_NAME ).receiveExistingTag( null );
};
const setupLoadingRegistry = ( registry ) => {
registry.dispatch( STORE_NAME ).setSettings( {} );
registry.dispatch( STORE_NAME ).receiveExistingTag( null );
};
const setupEmptyRegistry = ( registry ) => {
registry.dispatch( STORE_NAME ).setSettings( {} );
registry.dispatch( STORE_NAME ).receiveAccounts( [] );
registry.dispatch( STORE_NAME ).receiveExistingTag( null );
};
describe( 'AccountSelect', () => {
afterEach( () => apiFetchMock.mockClear() );
afterAll( () => jest.restoreAllMocks() );
it( 'should render an option for each analytics account', async () => {
const { getAllByRole } = render( <AccountSelect />, { setupRegistry } );
const listItems = getAllByRole( 'menuitem', { hidden: true } );
// Note: we do length + 1 here because there should also be an item for
// "Set up a new account".
expect( listItems ).toHaveLength( fixtures.accountsPropertiesProfiles.accounts.length + 1 );
expect( apiFetchMock ).not.toHaveBeenCalled();
} );
it( 'should have a "Set up a new account" item at the end of the list', async () => {
const { getAllByRole } = render( <AccountSelect />, { setupRegistry } );
const listItems = getAllByRole( 'menuitem', { hidden: true } );
expect( listItems[ listItems.length - 1 ].textContent ).toMatch( /set up a new account/i );
expect( apiFetchMock ).not.toHaveBeenCalled();
} );
it( 'should render a loading state when accounts are undefined', async () => {
muteConsole( 'warn' );
const { queryAllByRole, queryByRole } = render( <AccountSelect />, { setupRegistry: setupLoadingRegistry } );
expect( queryAllByRole( 'menuitem', { hidden: true } ) ).toHaveLength( 0 );
expect( queryByRole( 'progressbar' ) ).toBeInTheDocument();
// If accounts are `undefined`, we'll make a request to fetch them.
expect( apiFetchMock ).toHaveBeenCalled();
} );
it( 'should render a select box with only setup when no accounts exist', async () => {
const { getAllByRole } = render( <AccountSelect />, { setupRegistry: setupEmptyRegistry } );
const listItems = getAllByRole( 'menuitem', { hidden: true } );
expect( listItems ).toHaveLength( 1 );
expect( listItems[ listItems.length - 1 ].textContent ).toMatch( /set up a new account/i );
expect( apiFetchMock ).not.toHaveBeenCalled();
} );
it( 'should update accountID in the store when a new item is clicked', async () => {
const { getByText, container, registry } = render( <AccountSelect />, { setupRegistry } );
const originalAccountID = registry.select( STORE_NAME ).getAccountID();
// Click the label to expose the elements in the menu.
fireEvent.click( container.querySelector( '.mdc-floating-label' ) );
// Click this element to select it and fire the onChange event.
fireEvent.click( getByText( /set up a new account/i ) );
// Note: we use the new account option here to avoid querying properties profiles,
// as these are pre-selected when this changed (see next test).
const newAccountID = registry.select( STORE_NAME ).getAccountID();
expect( originalAccountID ).not.toEqual( newAccountID );
expect( newAccountID ).toEqual( ACCOUNT_CREATE );
} );
it( 'should pre-select the property and profile IDs when changed', () => {
const { accounts, properties, profiles } = fixtures.accountsPropertiesProfiles;
const { getByText, container, registry } = render( <AccountSelect />, { setupRegistry } );
registry.dispatch( STORE_NAME ).receiveProperties( properties );
registry.dispatch( STORE_NAME ).receiveProfiles( profiles );
// Click the label to expose the elements in the menu.
fireEvent.click( container.querySelector( '.mdc-floating-label' ) );
// Click this element to select it and fire the onChange event.
const account = accounts.find( ( acct ) => acct.id === properties[ 0 ].accountId );
fireEvent.click( getByText( account.name ) );
const newPropertyID = registry.select( STORE_NAME ).getPropertyID();
const newWebPropertyID = registry.select( STORE_NAME ).getInternalWebPropertyID();
const newProfileID = registry.select( STORE_NAME ).getProfileID();
expect( newPropertyID ).not.toBeFalsy();
expect( newWebPropertyID ).not.toBeFalsy();
expect( newProfileID ).not.toBeFalsy();
} );
} );
| 1 | 28,513 | Nit-picking, but this can be simplified to reference `properties[ 0 ]` since that is already retrieved 2 lines above. | google-site-kit-wp | js |
@@ -142,6 +142,10 @@ module Bolt
@unresolved_targets[t_name] = target
end
+ def remove_target(target)
+ @resolved_targets.delete(target.name)
+ end
+
def add_target(target)
@resolved_targets[target.name] = { 'name' => target.name }
end | 1 | # frozen_string_literal: true
require 'bolt/inventory/group'
require 'bolt/inventory/inventory2'
require 'bolt/inventory/target'
module Bolt
class Inventory
class Group2
attr_accessor :name, :groups
# Regex used to validate group names and target aliases.
NAME_REGEX = /\A[a-z0-9_][a-z0-9_-]*\Z/.freeze
DATA_KEYS = %w[config facts vars features plugin_hooks].freeze
TARGET_KEYS = DATA_KEYS + %w[name alias uri]
GROUP_KEYS = DATA_KEYS + %w[name groups targets]
CONFIG_KEYS = Bolt::TRANSPORTS.keys.map(&:to_s) + ['transport']
def initialize(input, plugins)
@logger = Logging.logger[self]
@plugins = plugins
input = @plugins.resolve_top_level_references(input) if @plugins.reference?(input)
raise ValidationError.new("Group does not have a name", nil) unless input.key?('name')
@name = @plugins.resolve_references(input['name'])
raise ValidationError.new("Group name must be a String, not #{@name.inspect}", nil) unless @name.is_a?(String)
raise ValidationError.new("Invalid group name #{@name}", @name) unless @name =~ NAME_REGEX
validate_group_input(input)
@input = input
validate_data_keys(@input)
targets = @plugins.resolve_top_level_references(input.fetch('targets', []))
@unresolved_targets = {}
@resolved_targets = {}
@aliases = {}
@string_targets = []
Array(targets).each do |target|
# If target is a string, it can either be trivially defining a target
# or it could be a name/alias of a target defined in another group.
# We can't tell the difference until all groups have been resolved,
# so we store the string on its own here and process it later.
if target.is_a?(String)
@string_targets << target
# Handle plugins at this level so that lookups cannot trigger recursive lookups
elsif target.is_a?(Hash)
add_target_definition(target)
else
raise ValidationError.new("Target entry must be a String or Hash, not #{target.class}", @name)
end
end
groups = input.fetch('groups', [])
# 'groups' can be a _plugin reference, in which case we want to resolve
# it. That can itself return a reference, so we want to keep resolving
# them until we have a value. We don't just use resolve_references
# though, since that will resolve any nested references and we want to
# leave it to the group to do that lazily.
groups = @plugins.resolve_top_level_references(groups)
@groups = Array(groups).map { |g| Group2.new(g, plugins) }
end
def target_data(target_name)
if @unresolved_targets.key?(target_name)
target = @unresolved_targets.delete(target_name)
resolved_data = resolve_data_keys(target, target_name).merge(
'name' => target['name'],
'uri' => target['uri'],
'alias' => target['alias'],
# groups come from group_data
'groups' => []
)
@resolved_targets[target_name] = resolved_data
else
@resolved_targets[target_name]
end
end
def all_target_names
@unresolved_targets.keys + @resolved_targets.keys
end
def add_target_definition(target)
# This check ensures target lookup plugins do not returns bare strings.
# Remove it if we decide to allows task plugins to return string Target
# names.
unless target.is_a?(Hash)
raise ValidationError.new("Target entry must be a Hash, not #{target.class}", @name)
end
target['name'] = @plugins.resolve_references(target['name']) if target.key?('name')
target['uri'] = @plugins.resolve_references(target['uri']) if target.key?('uri')
target['alias'] = @plugins.resolve_references(target['alias']) if target.key?('alias')
t_name = target['name'] || target['uri']
if t_name.nil? || t_name.empty?
raise ValidationError.new("No name or uri for target: #{target}", @name)
end
unless t_name.is_a? String
raise ValidationError.new("Target name must be a String, not #{t_name.class}", @name)
end
unless t_name.ascii_only?
raise ValidationError.new("Target name must be ASCII characters: #{target}", @name)
end
if local_targets.include?(t_name)
@logger.warn("Ignoring duplicate target in #{@name}: #{target}")
return
end
unless (unexpected_keys = target.keys - TARGET_KEYS).empty?
msg = "Found unexpected key(s) #{unexpected_keys.join(', ')} in target #{t_name}"
@logger.warn(msg)
end
validate_data_keys(target, t_name)
if target.include?('alias')
aliases = target['alias']
aliases = [aliases] if aliases.is_a?(String)
unless aliases.is_a?(Array)
msg = "Alias entry on #{t_name} must be a String or Array, not #{aliases.class}"
raise ValidationError.new(msg, @name)
end
insert_alia(t_name, aliases)
end
@unresolved_targets[t_name] = target
end
def add_target(target)
@resolved_targets[target.name] = { 'name' => target.name }
end
def insert_alia(target_name, aliases)
aliases.each do |alia|
raise ValidationError.new("Invalid alias #{alia}", @name) unless alia =~ NAME_REGEX
if (found = @aliases[alia])
raise ValidationError.new(alias_conflict(alia, found, target_name), @name)
end
@aliases[alia] = target_name
end
end
def clear_alia(target_name)
@aliases.reject! { |_alias, name| name == target_name }
end
def data_merge(data1, data2)
if data2.nil? || data1.nil?
return data2 || data1
end
{
'config' => Bolt::Util.deep_merge(data1['config'], data2['config']),
'name' => data1['name'] || data2['name'],
'uri' => data1['uri'] || data2['uri'],
# Collect all aliases across all groups for each target uri
'alias' => [*data1['alias'], *data2['alias']],
# Shallow merge instead of deep merge so that vars with a hash value
# are assigned a new hash, rather than merging the existing value
# with the value meant to replace it
'vars' => data1['vars'].merge(data2['vars']),
'facts' => Bolt::Util.deep_merge(data1['facts'], data2['facts']),
'features' => data1['features'] | data2['features'],
'plugin_hooks' => data1['plugin_hooks'].merge(data2['plugin_hooks']),
'groups' => data2['groups'] + data1['groups']
}
end
def resolve_string_targets(aliases, known_targets)
@string_targets.each do |string_target|
# If this is the name of a target defined elsewhere, then insert the
# target into this group as just a name. Otherwise, add a new target
# with the string as the URI.
if known_targets.include?(string_target)
@unresolved_targets[string_target] = { 'name' => string_target }
# If this is an alias for an existing target, then add it to this group
elsif (canonical_name = aliases[string_target])
if local_targets.include?(canonical_name)
@logger.warn("Ignoring duplicate target in #{@name}: #{canonical_name}")
else
@unresolved_targets[canonical_name] = { 'name' => canonical_name }
end
# If it's not the name or alias of an existing target, then make a
# new target using the string as the URI
elsif local_targets.include?(string_target)
@logger.warn("Ignoring duplicate target in #{@name}: #{string_target}")
else
@unresolved_targets[string_target] = { 'uri' => string_target }
end
end
@groups.each { |g| g.resolve_string_targets(aliases, known_targets) }
end
private def alias_conflict(name, target1, target2)
"Alias #{name} refers to multiple targets: #{target1} and #{target2}"
end
private def group_alias_conflict(name)
"Group #{name} conflicts with alias of the same name"
end
private def group_target_conflict(name)
"Group #{name} conflicts with target of the same name"
end
private def alias_target_conflict(name)
"Target name #{name} conflicts with alias of the same name"
end
def validate_group_input(input)
raise ValidationError.new("Expected group to be a Hash, not #{input.class}", nil) unless input.is_a?(Hash)
# DEPRECATION : remove this before finalization
if input.key?('target-lookups')
msg = "'target-lookups' are no longer a separate key. Merge 'target-lookups' and 'targets' lists and replace 'plugin' with '_plugin'" # rubocop:disable Metrics/LineLength
raise ValidationError.new(msg, @name)
end
unless (unexpected_keys = input.keys - GROUP_KEYS).empty?
msg = "Found unexpected key(s) #{unexpected_keys.join(', ')} in group #{@name}"
@logger.warn(msg)
end
Bolt::Util.walk_keys(input) do |key|
if @plugins.reference?(key)
raise ValidationError.new("Group keys cannot be specified as _plugin references", @name)
else
key
end
end
end
def validate(used_group_names = Set.new, used_target_names = Set.new, used_aliases = {})
# Test if this group name conflicts with anything used before.
raise ValidationError.new("Tried to redefine group #{@name}", @name) if used_group_names.include?(@name)
raise ValidationError.new(group_target_conflict(@name), @name) if used_target_names.include?(@name)
raise ValidationError.new(group_alias_conflict(@name), @name) if used_aliases.include?(@name)
used_group_names << @name
# Collect target names and aliases into a list used to validate that subgroups don't conflict.
# Used names validate that previously used group names don't conflict with new target names/aliases.
@unresolved_targets.merge(@resolved_targets).each do |t_name, t_data|
# Require targets to be parseable as a Target.
begin
# Catch malformed URI here
Bolt::Inventory::Target.parse_uri(t_data['uri'])
rescue Bolt::ParseError => e
@logger.debug(e)
raise ValidationError.new("Invalid target uri #{t_data['uri']}", @name)
end
raise ValidationError.new(group_target_conflict(t_name), @name) if used_group_names.include?(t_name)
if used_aliases.include?(t_name)
raise ValidationError.new(alias_target_conflict(t_name), @name)
end
used_target_names << t_name
end
@aliases.each do |n, target|
raise ValidationError.new(group_alias_conflict(n), @name) if used_group_names.include?(n)
if used_target_names.include?(n)
raise ValidationError.new(alias_target_conflict(n), @name)
end
if used_aliases.include?(n)
raise ValidationError.new(alias_conflict(n, target, used_aliases[n]), @name)
end
used_aliases[n] = target
end
@groups.each do |g|
begin
g.validate(used_group_names, used_target_names, used_aliases)
rescue ValidationError => e
e.add_parent(@name)
raise e
end
end
nil
end
def resolve_data_keys(data, target = nil)
result = {
'config' => @plugins.resolve_references(data.fetch('config', {})),
'vars' => @plugins.resolve_references(data.fetch('vars', {})),
'facts' => @plugins.resolve_references(data.fetch('facts', {})),
'features' => @plugins.resolve_references(data.fetch('features', [])),
'plugin_hooks' => @plugins.resolve_references(data.fetch('plugin_hooks', {}))
}
validate_data_keys(result, target)
result['features'] = Set.new(result['features'].flatten)
result
end
def validate_data_keys(data, target = nil)
{
'config' => Hash,
'vars' => Hash,
'facts' => Hash,
'features' => Array,
'plugin_hooks' => Hash
}.each do |key, expected_type|
next if !data.key?(key) || data[key].is_a?(expected_type) || @plugins.reference?(data[key])
msg = +"Expected #{key} to be of type #{expected_type}, not #{data[key].class}"
msg << " for target #{target}" if target
raise ValidationError.new(msg, @name)
end
unless @plugins.reference?(data['config'])
unexpected_keys = data.fetch('config', {}).keys - CONFIG_KEYS
if unexpected_keys.any?
msg = +"Found unexpected key(s) #{unexpected_keys.join(', ')} in config for"
msg << " target #{target} in" if target
msg << " group #{@name}"
@logger.warn(msg)
end
end
end
def group_data
@group_data ||= resolve_data_keys(@input).merge('groups' => [@name])
end
# Returns targets contained directly within the group, ignoring subgroups
def local_targets
Set.new(@unresolved_targets.keys) + Set.new(@resolved_targets.keys)
end
# Returns all targets contained within the group, which includes targets from subgroups.
def all_targets
@groups.inject(local_targets) do |acc, g|
acc.merge(g.all_targets)
end
end
# Returns a mapping of aliases to targets contained within the group, which includes subgroups.
def target_aliases
@groups.inject(@aliases) do |acc, g|
acc.merge(g.target_aliases)
end
end
# Return a mapping of group names to group.
def collect_groups
@groups.inject(name => self) do |acc, g|
acc.merge(g.collect_groups)
end
end
def target_collect(target_name)
child_data = @groups.map { |group| group.target_collect(target_name) }
# Data from earlier groups wins
child_result = child_data.inject do |acc, group_data|
data_merge(group_data, acc)
end
# Children override the parent
data_merge(target_data(target_name), child_result)
end
def group_collect(target_name)
child_data = @groups.map { |group| group.group_collect(target_name) }
# Data from earlier groups wins
child_result = child_data.inject do |acc, group_data|
data_merge(group_data, acc)
end
# If this group has the target or one of the child groups has the
# target, return the data, otherwise return nil
if child_result || local_targets.include?(target_name)
# Children override the parent
data_merge(group_data, child_result)
end
end
end
end
end
| 1 | 13,373 | This technically works because we call `get_targets` before we remove the target, so it's bound to have been resolved. But I'm not sure I would like to rely on that assumption. Should we also remove it from `@unresolved_targets` to be safe? | puppetlabs-bolt | rb |
@@ -200,7 +200,7 @@ public abstract class AbstractCallOperation extends AbstractOperation {
.sender(sender(frame))
.value(value(frame))
.apparentValue(apparentValue(frame))
- .code(new Code(contract != null ? contract.getCode() : Bytes.EMPTY))
+ .code(frame.getWorldState().getContract(contract).orElse(new Code()))
.blockHeader(frame.getBlockHeader())
.depth(frame.getMessageStackDepth() + 1)
.isStatic(isStatic(frame)) | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.vm;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.core.Account;
import org.hyperledger.besu.ethereum.core.Gas;
import java.util.Optional;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.units.bigints.UInt256;
/**
* A skeleton class for implementing call operations.
*
* <p>A call operation creates a child message call from the current message context, allows it to
* execute, and then updates the current message context based on its execution.
*/
public abstract class AbstractCallOperation extends AbstractOperation {
protected static final OperationResult UNDERFLOW_RESPONSE =
new OperationResult(
Optional.empty(), Optional.of(ExceptionalHaltReason.INSUFFICIENT_STACK_ITEMS));
protected AbstractCallOperation(
final int opcode,
final String name,
final int stackItemsConsumed,
final int stackItemsProduced,
final boolean updatesProgramCounter,
final int opSize,
final GasCalculator gasCalculator) {
super(
opcode,
name,
stackItemsConsumed,
stackItemsProduced,
updatesProgramCounter,
opSize,
gasCalculator);
}
/**
* Returns the additional gas to provide the call operation.
*
* @param frame The current message frame
* @return the additional gas to provide the call operation
*/
protected abstract Gas gas(MessageFrame frame);
/**
* Returns the account the call is being made to.
*
* @param frame The current message frame
* @return the account the call is being made to
*/
protected abstract Address to(MessageFrame frame);
/**
* Returns the value being transferred in the call
*
* @param frame The current message frame
* @return the value being transferred in the call
*/
protected abstract Wei value(MessageFrame frame);
/**
* Returns the apparent value being transferred in the call
*
* @param frame The current message frame
* @return the apparent value being transferred in the call
*/
protected abstract Wei apparentValue(MessageFrame frame);
/**
* Returns the memory offset the input data starts at.
*
* @param frame The current message frame
* @return the memory offset the input data starts at
*/
protected abstract UInt256 inputDataOffset(MessageFrame frame);
/**
* Returns the length of the input data to read from memory.
*
* @param frame The current message frame
* @return the length of the input data to read from memory.
*/
protected abstract UInt256 inputDataLength(MessageFrame frame);
/**
* Returns the memory offset the offset data starts at.
*
* @param frame The current message frame
* @return the memory offset the offset data starts at
*/
protected abstract UInt256 outputDataOffset(MessageFrame frame);
/**
* Returns the length of the output data to read from memory.
*
* @param frame The current message frame
* @return the length of the output data to read from memory.
*/
protected abstract UInt256 outputDataLength(MessageFrame frame);
/**
* Returns the account address the call operation is being performed on
*
* @param frame The current message frame
* @return the account address the call operation is being performed on
*/
protected abstract Address address(MessageFrame frame);
/**
* Returns the account address the call operation is being sent from
*
* @param frame The current message frame
* @return the account address the call operation is being sent from
*/
protected abstract Address sender(MessageFrame frame);
/**
* Returns the gas available to execute the child message call.
*
* @param frame The current message frame
* @return the gas available to execute the child message call
*/
protected abstract Gas gasAvailableForChildCall(MessageFrame frame);
/**
* Returns whether or not the child message call should be static.
*
* @param frame The current message frame
* @return {@code true} if the child message call should be static; otherwise {@code false}
*/
protected abstract boolean isStatic(MessageFrame frame);
@Override
public OperationResult execute(final MessageFrame frame, final EVM evm) {
// manual check because some reads won't come until the "complete" step.
if (frame.stackSize() < getStackItemsConsumed()) {
return UNDERFLOW_RESPONSE;
}
final Gas cost = cost(frame);
final Optional<Gas> optionalCost = Optional.ofNullable(cost);
if (cost != null) {
if (frame.getRemainingGas().compareTo(cost) < 0) {
return new OperationResult(
optionalCost, Optional.of(ExceptionalHaltReason.INSUFFICIENT_GAS));
}
frame.decrementRemainingGas(cost);
frame.clearReturnData();
final Address to = to(frame);
final Account contract = frame.getWorldState().get(to);
final Account account = frame.getWorldState().get(frame.getRecipientAddress());
final Wei balance = account.getBalance();
if (value(frame).compareTo(balance) > 0 || frame.getMessageStackDepth() >= 1024) {
frame.expandMemory(inputDataOffset(frame), inputDataLength(frame));
frame.expandMemory(outputDataOffset(frame), outputDataLength(frame));
frame.incrementRemainingGas(gasAvailableForChildCall(frame).plus(cost));
frame.popStackItems(getStackItemsConsumed());
frame.pushStackItem(UInt256.ZERO);
return new OperationResult(optionalCost, Optional.empty());
}
final Bytes inputData =
frame.readMutableMemory(inputDataOffset(frame), inputDataLength(frame));
final MessageFrame childFrame =
MessageFrame.builder()
.type(MessageFrame.Type.MESSAGE_CALL)
.messageFrameStack(frame.getMessageFrameStack())
.blockchain(frame.getBlockchain())
.worldState(frame.getWorldState().updater())
.initialGas(gasAvailableForChildCall(frame))
.address(address(frame))
.originator(frame.getOriginatorAddress())
.contract(to)
.gasPrice(frame.getGasPrice())
.inputData(inputData)
.sender(sender(frame))
.value(value(frame))
.apparentValue(apparentValue(frame))
.code(new Code(contract != null ? contract.getCode() : Bytes.EMPTY))
.blockHeader(frame.getBlockHeader())
.depth(frame.getMessageStackDepth() + 1)
.isStatic(isStatic(frame))
.completer(child -> complete(frame, child))
.miningBeneficiary(frame.getMiningBeneficiary())
.blockHashLookup(frame.getBlockHashLookup())
.maxStackSize(frame.getMaxStackSize())
.build();
frame.incrementRemainingGas(cost);
childFrame.copyWarmedUpFields(frame);
frame.getMessageFrameStack().addFirst(childFrame);
frame.setState(MessageFrame.State.CODE_SUSPENDED);
}
return new OperationResult(optionalCost, Optional.empty());
}
protected abstract Gas cost(final MessageFrame frame);
public void complete(final MessageFrame frame, final MessageFrame childFrame) {
frame.setState(MessageFrame.State.CODE_EXECUTING);
final UInt256 outputOffset = outputDataOffset(frame);
final UInt256 outputSize = outputDataLength(frame);
final Bytes outputData = childFrame.getOutputData();
final int outputSizeAsInt = outputSize.intValue();
if (outputSizeAsInt > outputData.size()) {
frame.expandMemory(outputOffset, outputSize);
frame.writeMemory(outputOffset, UInt256.valueOf(outputData.size()), outputData, true);
} else {
frame.writeMemory(outputOffset, outputSize, outputData, true);
}
frame.setReturnData(outputData);
frame.addLogs(childFrame.getLogs());
frame.addSelfDestructs(childFrame.getSelfDestructs());
frame.incrementGasRefund(childFrame.getGasRefund());
final Gas gasRemaining = childFrame.getRemainingGas();
frame.incrementRemainingGas(gasRemaining);
frame.popStackItems(getStackItemsConsumed());
if (childFrame.getState() == MessageFrame.State.COMPLETED_SUCCESS) {
frame.mergeWarmedUpFields(childFrame);
frame.pushStackItem(UInt256.ONE);
} else {
frame.pushStackItem(UInt256.ZERO);
}
final int currentPC = frame.getPC();
frame.setPC(currentPC + 1);
}
}
| 1 | 25,737 | So what happens if I try to get code on a EOA? | hyperledger-besu | java |
@@ -2400,15 +2400,8 @@ class VariablesChecker(BaseChecker):
return
# Attempt to check unpacking is properly balanced
- values: Optional[List] = None
- if isinstance(inferred, (nodes.Tuple, nodes.List)):
- values = inferred.itered()
- elif isinstance(inferred, astroid.Instance) and any(
- ancestor.qname() == "typing.NamedTuple" for ancestor in inferred.ancestors()
- ):
- values = [i for i in inferred.values() if isinstance(i, nodes.AssignName)]
-
- if values:
+ values = self._get_values_to_unpack(inferred)
+ if values is not None:
if len(targets) != len(values):
# Check if we have starred nodes.
if any(isinstance(target, nodes.Starred) for target in targets): | 1 | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2009 Mads Kiilerich <mads@kiilerich.com>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2011-2014, 2017 Google, Inc.
# Copyright (c) 2012 FELD Boris <lothiraldan@gmail.com>
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Michal Nowikowski <godfryd@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Ricardo Gemignani <ricardo.gemignani@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Simu Toni <simutoni@gmail.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2018-2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Derek Gustafson <degustaf@gmail.com>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Grant Welch <gwelch925+github@gmail.com>
# Copyright (c) 2017-2018, 2021 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2017-2018, 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 Dan Garrette <dhgarrette@gmail.com>
# Copyright (c) 2018-2019 Jim Robertson <jrobertson98atx@gmail.com>
# Copyright (c) 2018 Mike Miller <mtmiller@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 Drew <drewrisinger@users.noreply.github.com>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.guinta@protonmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Marianna Polatoglou <mpolatoglou@bloomberg.net>
# Copyright (c) 2018 mar-chi-pan <mar.polatoglou@gmail.com>
# Copyright (c) 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2019, 2021 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2019 Djailla <bastien.vallet@gmail.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2020 Andrew Simmons <anjsimmo@gmail.com>
# Copyright (c) 2020 Andrew Simmons <a.simmons@deakin.edu.au>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 Ashley Whetter <ashleyw@activestate.com>
# Copyright (c) 2021 Daniël van Noord <13665637+DanielNoord@users.noreply.github.com>
# Copyright (c) 2021 Tushar Sadhwani <tushar.sadhwani000@gmail.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 bot <bot@noreply.github.com>
# Copyright (c) 2021 David Liu <david@cs.toronto.edu>
# Copyright (c) 2021 kasium <15907922+kasium@users.noreply.github.com>
# Copyright (c) 2021 Marcin Kurczewski <rr-@sakuya.pl>
# Copyright (c) 2021 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2021 Lorena B <46202743+lorena-b@users.noreply.github.com>
# Copyright (c) 2021 haasea <44787650+haasea@users.noreply.github.com>
# Copyright (c) 2021 Alexander Kapshuna <kapsh@kap.sh>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""Variables checkers for Python code"""
import collections
import copy
import itertools
import os
import re
import sys
from enum import Enum
from functools import lru_cache
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
List,
NamedTuple,
Optional,
Set,
Tuple,
Union,
)
import astroid
from astroid import nodes
from pylint.checkers import BaseChecker, utils
from pylint.checkers.utils import is_postponed_evaluation_enabled
from pylint.constants import PY39_PLUS
from pylint.interfaces import HIGH, INFERENCE, INFERENCE_FAILURE, IAstroidChecker
from pylint.utils import get_global_option
if TYPE_CHECKING:
from pylint.lint import PyLinter
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
SPECIAL_OBJ = re.compile("^_{2}[a-z]+_{2}$")
FUTURE = "__future__"
# regexp for ignored argument name
IGNORED_ARGUMENT_NAMES = re.compile("_.*|^ignored_|^unused_")
# In Python 3.7 abc has a Python implementation which is preferred
# by astroid. Unfortunately this also messes up our explicit checks
# for `abc`
METACLASS_NAME_TRANSFORMS = {"_py_abc": "abc"}
TYPING_TYPE_CHECKS_GUARDS = frozenset({"typing.TYPE_CHECKING", "TYPE_CHECKING"})
BUILTIN_RANGE = "builtins.range"
TYPING_MODULE = "typing"
TYPING_NAMES = frozenset(
{
"Any",
"Callable",
"ClassVar",
"Generic",
"Optional",
"Tuple",
"Type",
"TypeVar",
"Union",
"AbstractSet",
"ByteString",
"Container",
"ContextManager",
"Hashable",
"ItemsView",
"Iterable",
"Iterator",
"KeysView",
"Mapping",
"MappingView",
"MutableMapping",
"MutableSequence",
"MutableSet",
"Sequence",
"Sized",
"ValuesView",
"Awaitable",
"AsyncIterator",
"AsyncIterable",
"Coroutine",
"Collection",
"AsyncGenerator",
"AsyncContextManager",
"Reversible",
"SupportsAbs",
"SupportsBytes",
"SupportsComplex",
"SupportsFloat",
"SupportsInt",
"SupportsRound",
"Counter",
"Deque",
"Dict",
"DefaultDict",
"List",
"Set",
"FrozenSet",
"NamedTuple",
"Generator",
"AnyStr",
"Text",
"Pattern",
"BinaryIO",
}
)
class VariableVisitConsumerAction(Enum):
"""Used after _visit_consumer to determine the action to be taken
Continue -> continue loop to next consumer
Return -> return and thereby break the loop
Consume -> consume the found nodes (second return value) and return
"""
CONTINUE = 0
RETURN = 1
CONSUME = 2
def _is_from_future_import(stmt, name):
"""Check if the name is a future import from another module."""
try:
module = stmt.do_import_module(stmt.modname)
except astroid.AstroidBuildingException:
return None
for local_node in module.locals.get(name, []):
if isinstance(local_node, nodes.ImportFrom) and local_node.modname == FUTURE:
return True
return None
def in_for_else_branch(parent, stmt):
"""Returns True if stmt in inside the else branch for a parent For stmt."""
return isinstance(parent, nodes.For) and any(
else_stmt.parent_of(stmt) or else_stmt == stmt for else_stmt in parent.orelse
)
@lru_cache(maxsize=1000)
def overridden_method(klass, name):
"""get overridden method if any"""
try:
parent = next(klass.local_attr_ancestors(name))
except (StopIteration, KeyError):
return None
try:
meth_node = parent[name]
except KeyError:
# We have found an ancestor defining <name> but it's not in the local
# dictionary. This may happen with astroid built from living objects.
return None
if isinstance(meth_node, nodes.FunctionDef):
return meth_node
return None
def _get_unpacking_extra_info(node, inferred):
"""return extra information to add to the message for unpacking-non-sequence
and unbalanced-tuple-unpacking errors
"""
more = ""
inferred_module = inferred.root().name
if node.root().name == inferred_module:
if node.lineno == inferred.lineno:
more = f" {inferred.as_string()}"
elif inferred.lineno:
more = f" defined at line {inferred.lineno}"
elif inferred.lineno:
more = f" defined at line {inferred.lineno} of {inferred_module}"
return more
def _detect_global_scope(node, frame, defframe):
"""Detect that the given frames shares a global
scope.
Two frames shares a global scope when neither
of them are hidden under a function scope, as well
as any of parent scope of them, until the root scope.
In this case, depending from something defined later on
will not work, because it is still undefined.
Example:
class A:
# B has the same global scope as `C`, leading to a NameError.
class B(C): ...
class C: ...
"""
def_scope = scope = None
if frame and frame.parent:
scope = frame.parent.scope()
if defframe and defframe.parent:
def_scope = defframe.parent.scope()
if isinstance(frame, nodes.FunctionDef):
# If the parent of the current node is a
# function, then it can be under its scope
# (defined in, which doesn't concern us) or
# the `->` part of annotations. The same goes
# for annotations of function arguments, they'll have
# their parent the Arguments node.
if not isinstance(node.parent, (nodes.FunctionDef, nodes.Arguments)):
return False
elif any(
not isinstance(f, (nodes.ClassDef, nodes.Module)) for f in (frame, defframe)
):
# Not interested in other frames, since they are already
# not in a global scope.
return False
break_scopes = []
for current_scope in (scope, def_scope):
# Look for parent scopes. If there is anything different
# than a module or a class scope, then they frames don't
# share a global scope.
parent_scope = current_scope
while parent_scope:
if not isinstance(parent_scope, (nodes.ClassDef, nodes.Module)):
break_scopes.append(parent_scope)
break
if parent_scope.parent:
parent_scope = parent_scope.parent.scope()
else:
break
if break_scopes and len(set(break_scopes)) != 1:
# Store different scopes than expected.
# If the stored scopes are, in fact, the very same, then it means
# that the two frames (frame and defframe) shares the same scope,
# and we could apply our lineno analysis over them.
# For instance, this works when they are inside a function, the node
# that uses a definition and the definition itself.
return False
# At this point, we are certain that frame and defframe shares a scope
# and the definition of the first depends on the second.
return frame.lineno < defframe.lineno
def _infer_name_module(node, name):
context = astroid.context.InferenceContext()
context.lookupname = name
return node.infer(context, asname=False)
def _fix_dot_imports(not_consumed):
"""Try to fix imports with multiple dots, by returning a dictionary
with the import names expanded. The function unflattens root imports,
like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
and 'xml.sax' respectively.
"""
names = {}
for name, stmts in not_consumed.items():
if any(
isinstance(stmt, nodes.AssignName)
and isinstance(stmt.assign_type(), nodes.AugAssign)
for stmt in stmts
):
continue
for stmt in stmts:
if not isinstance(stmt, (nodes.ImportFrom, nodes.Import)):
continue
for imports in stmt.names:
second_name = None
import_module_name = imports[0]
if import_module_name == "*":
# In case of wildcard imports,
# pick the name from inside the imported module.
second_name = name
else:
name_matches_dotted_import = False
if (
import_module_name.startswith(name)
and import_module_name.find(".") > -1
):
name_matches_dotted_import = True
if name_matches_dotted_import or name in imports:
# Most likely something like 'xml.etree',
# which will appear in the .locals as 'xml'.
# Only pick the name if it wasn't consumed.
second_name = import_module_name
if second_name and second_name not in names:
names[second_name] = stmt
return sorted(names.items(), key=lambda a: a[1].fromlineno)
def _find_frame_imports(name, frame):
"""Detect imports in the frame, with the required
*name*. Such imports can be considered assignments.
Returns True if an import for the given name was found.
"""
imports = frame.nodes_of_class((nodes.Import, nodes.ImportFrom))
for import_node in imports:
for import_name, import_alias in import_node.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias == name:
return True
elif import_name and import_name == name:
return True
return None
def _import_name_is_global(stmt, global_names):
for import_name, import_alias in stmt.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias in global_names:
return True
elif import_name in global_names:
return True
return False
def _flattened_scope_names(iterator):
values = (set(stmt.names) for stmt in iterator)
return set(itertools.chain.from_iterable(values))
def _assigned_locally(name_node):
"""Checks if name_node has corresponding assign statement in same scope"""
assign_stmts = name_node.scope().nodes_of_class(nodes.AssignName)
return any(a.name == name_node.name for a in assign_stmts)
def _is_type_checking_import(node: Union[nodes.Import, nodes.ImportFrom]) -> bool:
"""Check if an import node is guarded by a TYPE_CHECKS guard"""
return any(
isinstance(ancestor, nodes.If)
and ancestor.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
for ancestor in node.node_ancestors()
)
def _has_locals_call_after_node(stmt, scope):
skip_nodes = (
nodes.FunctionDef,
nodes.ClassDef,
nodes.Import,
nodes.ImportFrom,
)
for call in scope.nodes_of_class(nodes.Call, skip_klass=skip_nodes):
inferred = utils.safe_infer(call.func)
if (
utils.is_builtin_object(inferred)
and getattr(inferred, "name", None) == "locals"
):
if stmt.lineno < call.lineno:
return True
return False
MSGS = {
"E0601": (
"Using variable %r before assignment",
"used-before-assignment",
"Emitted when a local variable is accessed before its assignment took place. "
"Assignments in try blocks are assumed not to have occurred when evaluating "
"associated except/finally blocks. Assignments in except blocks are assumed "
"not to have occurred when evaluating statements outside the block, except "
"when the associated try block contains a return statement.",
),
"E0602": (
"Undefined variable %r",
"undefined-variable",
"Used when an undefined variable is accessed.",
),
"E0603": (
"Undefined variable name %r in __all__",
"undefined-all-variable",
"Used when an undefined variable name is referenced in __all__.",
),
"E0604": (
"Invalid object %r in __all__, must contain only strings",
"invalid-all-object",
"Used when an invalid (non-string) object occurs in __all__.",
),
"E0605": (
"Invalid format for __all__, must be tuple or list",
"invalid-all-format",
"Used when __all__ has an invalid format.",
),
"E0611": (
"No name %r in module %r",
"no-name-in-module",
"Used when a name cannot be found in a module.",
),
"W0601": (
"Global variable %r undefined at the module level",
"global-variable-undefined",
'Used when a variable is defined through the "global" statement '
"but the variable is not defined in the module scope.",
),
"W0602": (
"Using global for %r but no assignment is done",
"global-variable-not-assigned",
'Used when a variable is defined through the "global" statement '
"but no assignment to this variable is done.",
),
"W0603": (
"Using the global statement", # W0121
"global-statement",
'Used when you use the "global" statement to update a global '
"variable. Pylint just try to discourage this "
"usage. That doesn't mean you cannot use it !",
),
"W0604": (
"Using the global statement at the module level", # W0103
"global-at-module-level",
'Used when you use the "global" statement at the module level '
"since it has no effect",
),
"W0611": (
"Unused %s",
"unused-import",
"Used when an imported module or variable is not used.",
),
"W0612": (
"Unused variable %r",
"unused-variable",
"Used when a variable is defined but not used.",
),
"W0613": (
"Unused argument %r",
"unused-argument",
"Used when a function or method argument is not used.",
),
"W0614": (
"Unused import(s) %s from wildcard import of %s",
"unused-wildcard-import",
"Used when an imported module or variable is not used from a "
"`'from X import *'` style import.",
),
"W0621": (
"Redefining name %r from outer scope (line %s)",
"redefined-outer-name",
"Used when a variable's name hides a name defined in the outer scope.",
),
"W0622": (
"Redefining built-in %r",
"redefined-builtin",
"Used when a variable or function override a built-in.",
),
"W0631": (
"Using possibly undefined loop variable %r",
"undefined-loop-variable",
"Used when a loop variable (i.e. defined by a for loop or "
"a list comprehension or a generator expression) is used outside "
"the loop.",
),
"W0632": (
"Possible unbalanced tuple unpacking with "
"sequence%s: "
"left side has %d label(s), right side has %d value(s)",
"unbalanced-tuple-unpacking",
"Used when there is an unbalanced tuple unpacking in assignment",
{"old_names": [("E0632", "old-unbalanced-tuple-unpacking")]},
),
"E0633": (
"Attempting to unpack a non-sequence%s",
"unpacking-non-sequence",
"Used when something which is not "
"a sequence is used in an unpack assignment",
{"old_names": [("W0633", "old-unpacking-non-sequence")]},
),
"W0640": (
"Cell variable %s defined in loop",
"cell-var-from-loop",
"A variable used in a closure is defined in a loop. "
"This will result in all closures using the same value for "
"the closed-over variable.",
),
"W0641": (
"Possibly unused variable %r",
"possibly-unused-variable",
"Used when a variable is defined but might not be used. "
"The possibility comes from the fact that locals() might be used, "
"which could consume or not the said variable",
),
"W0642": (
"Invalid assignment to %s in method",
"self-cls-assignment",
"Invalid assignment to self or cls in instance or class method "
"respectively.",
),
}
class ScopeConsumer(NamedTuple):
"""Store nodes and their consumption states."""
to_consume: Dict[str, List[nodes.NodeNG]]
consumed: Dict[str, List[nodes.NodeNG]]
consumed_uncertain: DefaultDict[str, List[nodes.NodeNG]]
scope_type: str
class NamesConsumer:
"""A simple class to handle consumed, to consume and scope type info of node locals"""
def __init__(self, node, scope_type):
self._atomic = ScopeConsumer(
copy.copy(node.locals), {}, collections.defaultdict(list), scope_type
)
self.node = node
def __repr__(self):
to_consumes = [f"{k}->{v}" for k, v in self._atomic.to_consume.items()]
consumed = [f"{k}->{v}" for k, v in self._atomic.consumed.items()]
consumed_uncertain = [
f"{k}->{v}" for k, v in self._atomic.consumed_uncertain.items()
]
to_consumes = ", ".join(to_consumes)
consumed = ", ".join(consumed)
consumed_uncertain = ", ".join(consumed_uncertain)
return f"""
to_consume : {to_consumes}
consumed : {consumed}
consumed_uncertain: {consumed_uncertain}
scope_type : {self._atomic.scope_type}
"""
def __iter__(self):
return iter(self._atomic)
@property
def to_consume(self):
return self._atomic.to_consume
@property
def consumed(self):
return self._atomic.consumed
@property
def consumed_uncertain(self) -> DefaultDict[str, List[nodes.NodeNG]]:
"""Retrieves nodes filtered out by get_next_to_consume() that may not
have executed, such as statements in except blocks, or statements
in try blocks (when evaluating their corresponding except and finally
blocks). Checkers that want to treat the statements as executed
(e.g. for unused-variable) may need to add them back.
"""
return self._atomic.consumed_uncertain
@property
def scope_type(self):
return self._atomic.scope_type
def mark_as_consumed(self, name, consumed_nodes):
"""Mark the given nodes as consumed for the name.
If all of the nodes for the name were consumed, delete the name from
the to_consume dictionary
"""
unconsumed = [n for n in self.to_consume[name] if n not in set(consumed_nodes)]
self.consumed[name] = consumed_nodes
if unconsumed:
self.to_consume[name] = unconsumed
else:
del self.to_consume[name]
def get_next_to_consume(self, node: nodes.Name) -> Optional[List[nodes.NodeNG]]:
"""Return a list of the nodes that define `node` from this scope. If it is
uncertain whether a node will be consumed, such as for statements in
except blocks, add it to self.consumed_uncertain instead of returning it.
Return None to indicate a special case that needs to be handled by the caller.
"""
name = node.name
parent_node = node.parent
found_nodes = self.to_consume.get(name)
node_statement = node.statement(future=True)
if (
found_nodes
and isinstance(parent_node, nodes.Assign)
and parent_node == found_nodes[0].parent
):
lhs = found_nodes[0].parent.targets[0]
if lhs.name == name: # this name is defined in this very statement
found_nodes = None
if (
found_nodes
and isinstance(parent_node, nodes.For)
and parent_node.iter == node
and parent_node.target in found_nodes
):
found_nodes = None
# Filter out assignments in ExceptHandlers that node is not contained in
if found_nodes:
found_nodes = [
n
for n in found_nodes
if not isinstance(n.statement(future=True), nodes.ExceptHandler)
or n.statement(future=True).parent_of(node)
]
# Filter out assignments in an Except clause that the node is not
# contained in, assuming they may fail
if found_nodes:
uncertain_nodes = self._uncertain_nodes_in_except_blocks(
found_nodes, node, node_statement
)
self.consumed_uncertain[node.name] += uncertain_nodes
uncertain_nodes_set = set(uncertain_nodes)
found_nodes = [n for n in found_nodes if n not in uncertain_nodes_set]
# If this node is in a Finally block of a Try/Finally,
# filter out assignments in the try portion, assuming they may fail
if found_nodes:
uncertain_nodes = (
self._uncertain_nodes_in_try_blocks_when_evaluating_finally_blocks(
found_nodes, node_statement
)
)
self.consumed_uncertain[node.name] += uncertain_nodes
uncertain_nodes_set = set(uncertain_nodes)
found_nodes = [n for n in found_nodes if n not in uncertain_nodes_set]
# If this node is in an ExceptHandler,
# filter out assignments in the try portion, assuming they may fail
if found_nodes:
uncertain_nodes = (
self._uncertain_nodes_in_try_blocks_when_evaluating_except_blocks(
found_nodes, node_statement
)
)
self.consumed_uncertain[node.name] += uncertain_nodes
uncertain_nodes_set = set(uncertain_nodes)
found_nodes = [n for n in found_nodes if n not in uncertain_nodes_set]
return found_nodes
@staticmethod
def _uncertain_nodes_in_except_blocks(
found_nodes: List[nodes.NodeNG],
node: nodes.NodeNG,
node_statement: nodes.Statement,
) -> List[nodes.NodeNG]:
"""Return any nodes in ``found_nodes`` that should be treated as uncertain
because they are in an except block.
"""
uncertain_nodes = []
for other_node in found_nodes:
other_node_statement = other_node.statement(future=True)
# Only testing for statements in the except block of TryExcept
if not (
isinstance(other_node_statement.parent, nodes.ExceptHandler)
and isinstance(other_node_statement.parent.parent, nodes.TryExcept)
):
continue
# If the other node is in the same scope as this node, assume it executes
if other_node_statement.parent.parent_of(node):
continue
try_block_returns = any(
isinstance(try_statement, nodes.Return)
for try_statement in other_node_statement.parent.parent.body
)
# If the try block returns, assume the except blocks execute.
if try_block_returns:
# Exception: if this node is in the final block of the other_node_statement,
# it will execute before returning. Assume the except statements are uncertain.
if (
isinstance(node_statement.parent, nodes.TryFinally)
and node_statement in node_statement.parent.finalbody
# We have already tested that other_node_statement has two parents
# and it was TryExcept, so getting one more parent is safe.
and other_node_statement.parent.parent.parent.parent_of(
node_statement
)
):
uncertain_nodes.append(other_node)
else:
# Assume the except blocks execute. Possibility for a false negative
# if one of the except blocks does not define the name in question,
# raise, or return. See: https://github.com/PyCQA/pylint/issues/5524.
continue
# Passed all tests for uncertain execution
uncertain_nodes.append(other_node)
return uncertain_nodes
@staticmethod
def _uncertain_nodes_in_try_blocks_when_evaluating_except_blocks(
found_nodes: List[nodes.NodeNG], node_statement: nodes.Statement
) -> List[nodes.NodeNG]:
"""Return any nodes in ``found_nodes`` that should be treated as uncertain
because they are in a try block and the ``node_statement`` being evaluated
is in one of its except handlers.
"""
uncertain_nodes: List[nodes.NodeNG] = []
closest_except_handler = utils.get_node_first_ancestor_of_type(
node_statement, nodes.ExceptHandler
)
if closest_except_handler is None:
return uncertain_nodes
for other_node in found_nodes:
other_node_statement = other_node.statement(future=True)
# If the other statement is the except handler guarding `node`, it executes
if other_node_statement is closest_except_handler:
continue
# Ensure other_node is in a try block
(
other_node_try_ancestor,
other_node_try_ancestor_visited_child,
) = utils.get_node_first_ancestor_of_type_and_its_child(
other_node_statement, nodes.TryExcept
)
if other_node_try_ancestor is None:
continue
if (
other_node_try_ancestor_visited_child
not in other_node_try_ancestor.body
):
continue
# Make sure nesting is correct -- there should be at least one
# except handler that is a sibling attached to the try ancestor,
# or is an ancestor of the try ancestor.
if not any(
closest_except_handler in other_node_try_ancestor.handlers
or other_node_try_ancestor_except_handler
in closest_except_handler.node_ancestors()
for other_node_try_ancestor_except_handler in other_node_try_ancestor.handlers
):
continue
# Passed all tests for uncertain execution
uncertain_nodes.append(other_node)
return uncertain_nodes
@staticmethod
def _uncertain_nodes_in_try_blocks_when_evaluating_finally_blocks(
found_nodes: List[nodes.NodeNG], node_statement: nodes.Statement
) -> List[nodes.NodeNG]:
uncertain_nodes: List[nodes.NodeNG] = []
(
closest_try_finally_ancestor,
child_of_closest_try_finally_ancestor,
) = utils.get_node_first_ancestor_of_type_and_its_child(
node_statement, nodes.TryFinally
)
if closest_try_finally_ancestor is None:
return uncertain_nodes
if (
child_of_closest_try_finally_ancestor
not in closest_try_finally_ancestor.finalbody
):
return uncertain_nodes
for other_node in found_nodes:
other_node_statement = other_node.statement(future=True)
(
other_node_try_finally_ancestor,
child_of_other_node_try_finally_ancestor,
) = utils.get_node_first_ancestor_of_type_and_its_child(
other_node_statement, nodes.TryFinally
)
if other_node_try_finally_ancestor is None:
continue
# other_node needs to descend from the try of a try/finally.
if (
child_of_other_node_try_finally_ancestor
not in other_node_try_finally_ancestor.body
):
continue
# If the two try/finally ancestors are not the same, then
# node_statement's closest try/finally ancestor needs to be in
# the final body of other_node's try/finally ancestor, or
# descend from one of the statements in that final body.
if (
other_node_try_finally_ancestor is not closest_try_finally_ancestor
and not any(
other_node_final_statement is closest_try_finally_ancestor
or other_node_final_statement.parent_of(
closest_try_finally_ancestor
)
for other_node_final_statement in other_node_try_finally_ancestor.finalbody
)
):
continue
# Passed all tests for uncertain execution
uncertain_nodes.append(other_node)
return uncertain_nodes
# pylint: disable=too-many-public-methods
class VariablesChecker(BaseChecker):
"""checks for
* unused variables / imports
* undefined variables
* redefinition of variable from builtins or from an outer scope
* use of variable before assignment
* __all__ consistency
* self/cls assignment
"""
__implements__ = IAstroidChecker
name = "variables"
msgs = MSGS
priority = -1
options = (
(
"init-import",
{
"default": 0,
"type": "yn",
"metavar": "<y or n>",
"help": "Tells whether we should check for unused import in "
"__init__ files.",
},
),
(
"dummy-variables-rgx",
{
"default": "_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_",
"type": "regexp",
"metavar": "<regexp>",
"help": "A regular expression matching the name of dummy "
"variables (i.e. expected to not be used).",
},
),
(
"additional-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of additional names supposed to be defined in "
"builtins. Remember that you should avoid defining new builtins "
"when possible.",
},
),
(
"callbacks",
{
"default": ("cb_", "_cb"),
"type": "csv",
"metavar": "<callbacks>",
"help": "List of strings which can identify a callback "
"function by name. A callback name must start or "
"end with one of those strings.",
},
),
(
"redefining-builtins-modules",
{
"default": (
"six.moves",
"past.builtins",
"future.builtins",
"builtins",
"io",
),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of qualified module names which can have objects "
"that can redefine builtins.",
},
),
(
"ignored-argument-names",
{
"default": IGNORED_ARGUMENT_NAMES,
"type": "regexp",
"metavar": "<regexp>",
"help": "Argument names that match this expression will be "
"ignored. Default to name with leading underscore.",
},
),
(
"allow-global-unused-variables",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Tells whether unused global variables should be treated as a violation.",
},
),
(
"allowed-redefined-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of names allowed to shadow builtins",
},
),
)
def __init__(self, linter=None):
super().__init__(linter)
self._to_consume: List[NamesConsumer] = []
self._checking_mod_attr = None
self._loop_variables = []
self._type_annotation_names = []
self._except_handler_names_queue: List[
Tuple[nodes.ExceptHandler, nodes.AssignName]
] = []
"""This is a queue, last in first out"""
self._postponed_evaluation_enabled = False
def open(self) -> None:
"""Called when loading the checker"""
self._is_undefined_variable_enabled = self.linter.is_message_enabled(
"undefined-variable"
)
self._is_used_before_assignment_enabled = self.linter.is_message_enabled(
"used-before-assignment"
)
self._is_undefined_loop_variable_enabled = self.linter.is_message_enabled(
"undefined-loop-variable"
)
@utils.check_messages("redefined-outer-name")
def visit_for(self, node: nodes.For) -> None:
assigned_to = [a.name for a in node.target.nodes_of_class(nodes.AssignName)]
# Only check variables that are used
dummy_rgx = self.config.dummy_variables_rgx
assigned_to = [var for var in assigned_to if not dummy_rgx.match(var)]
for variable in assigned_to:
for outer_for, outer_variables in self._loop_variables:
if variable in outer_variables and not in_for_else_branch(
outer_for, node
):
self.add_message(
"redefined-outer-name",
args=(variable, outer_for.fromlineno),
node=node,
)
break
self._loop_variables.append((node, assigned_to))
@utils.check_messages("redefined-outer-name")
def leave_for(self, node: nodes.For) -> None:
self._loop_variables.pop()
self._store_type_annotation_names(node)
def visit_module(self, node: nodes.Module) -> None:
"""visit module : update consumption analysis variable
checks globals doesn't overrides builtins
"""
self._to_consume = [NamesConsumer(node, "module")]
self._postponed_evaluation_enabled = is_postponed_evaluation_enabled(node)
for name, stmts in node.locals.items():
if utils.is_builtin(name):
if self._should_ignore_redefined_builtin(stmts[0]) or name == "__doc__":
continue
self.add_message("redefined-builtin", args=name, node=stmts[0])
@utils.check_messages(
"unused-import",
"unused-wildcard-import",
"redefined-builtin",
"undefined-all-variable",
"invalid-all-object",
"invalid-all-format",
"unused-variable",
)
def leave_module(self, node: nodes.Module) -> None:
"""leave module: check globals"""
assert len(self._to_consume) == 1
self._check_metaclasses(node)
not_consumed = self._to_consume.pop().to_consume
# attempt to check for __all__ if defined
if "__all__" in node.locals:
self._check_all(node, not_consumed)
# check for unused globals
self._check_globals(not_consumed)
# don't check unused imports in __init__ files
if not self.config.init_import and node.package:
return
self._check_imports(not_consumed)
def visit_classdef(self, node: nodes.ClassDef) -> None:
"""visit class: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "class"))
def leave_classdef(self, _: nodes.ClassDef) -> None:
"""leave class: update consumption analysis variable"""
# do not check for not used locals here (no sense)
self._to_consume.pop()
def visit_lambda(self, node: nodes.Lambda) -> None:
"""visit lambda: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "lambda"))
def leave_lambda(self, _: nodes.Lambda) -> None:
"""leave lambda: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_generatorexp(self, node: nodes.GeneratorExp) -> None:
"""visit genexpr: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_generatorexp(self, _: nodes.GeneratorExp) -> None:
"""leave genexpr: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_dictcomp(self, node: nodes.DictComp) -> None:
"""visit dictcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_dictcomp(self, _: nodes.DictComp) -> None:
"""leave dictcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_setcomp(self, node: nodes.SetComp) -> None:
"""visit setcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_setcomp(self, _: nodes.SetComp) -> None:
"""leave setcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
"""visit function: update consumption analysis variable and check locals"""
self._to_consume.append(NamesConsumer(node, "function"))
if not (
self.linter.is_message_enabled("redefined-outer-name")
or self.linter.is_message_enabled("redefined-builtin")
):
return
globs = node.root().globals
for name, stmt in node.items():
if name in globs and not isinstance(stmt, nodes.Global):
definition = globs[name][0]
if (
isinstance(definition, nodes.ImportFrom)
and definition.modname == FUTURE
):
# It is a __future__ directive, not a symbol.
continue
# Do not take in account redefined names for the purpose
# of type checking.:
if any(
isinstance(definition.parent, nodes.If)
and definition.parent.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
for definition in globs[name]
):
continue
line = definition.fromlineno
if not self._is_name_ignored(stmt, name):
self.add_message(
"redefined-outer-name", args=(name, line), node=stmt
)
elif (
utils.is_builtin(name)
and not self._allowed_redefined_builtin(name)
and not self._should_ignore_redefined_builtin(stmt)
):
# do not print Redefining builtin for additional builtins
self.add_message("redefined-builtin", args=name, node=stmt)
def leave_functiondef(self, node: nodes.FunctionDef) -> None:
"""leave function: check function's locals are consumed"""
self._check_metaclasses(node)
if node.type_comment_returns:
self._store_type_annotation_node(node.type_comment_returns)
if node.type_comment_args:
for argument_annotation in node.type_comment_args:
self._store_type_annotation_node(argument_annotation)
not_consumed = self._to_consume.pop().to_consume
if not (
self.linter.is_message_enabled("unused-variable")
or self.linter.is_message_enabled("possibly-unused-variable")
or self.linter.is_message_enabled("unused-argument")
):
return
# Don't check arguments of function which are only raising an exception.
if utils.is_error(node):
return
# Don't check arguments of abstract methods or within an interface.
is_method = node.is_method()
if is_method and node.is_abstract():
return
global_names = _flattened_scope_names(node.nodes_of_class(nodes.Global))
nonlocal_names = _flattened_scope_names(node.nodes_of_class(nodes.Nonlocal))
for name, stmts in not_consumed.items():
self._check_is_unused(name, node, stmts[0], global_names, nonlocal_names)
visit_asyncfunctiondef = visit_functiondef
leave_asyncfunctiondef = leave_functiondef
@utils.check_messages(
"global-variable-undefined",
"global-variable-not-assigned",
"global-statement",
"global-at-module-level",
"redefined-builtin",
)
def visit_global(self, node: nodes.Global) -> None:
"""check names imported exists in the global scope"""
frame = node.frame(future=True)
if isinstance(frame, nodes.Module):
self.add_message("global-at-module-level", node=node)
return
module = frame.root()
default_message = True
locals_ = node.scope().locals
for name in node.names:
try:
assign_nodes = module.getattr(name)
except astroid.NotFoundError:
# unassigned global, skip
assign_nodes = []
not_defined_locally_by_import = not any(
isinstance(local, nodes.Import) for local in locals_.get(name, ())
)
if (
not utils.is_reassigned_after_current(node, name)
and not utils.is_deleted_after_current(node, name)
and not_defined_locally_by_import
):
self.add_message("global-variable-not-assigned", args=name, node=node)
default_message = False
continue
for anode in assign_nodes:
if (
isinstance(anode, nodes.AssignName)
and anode.name in module.special_attributes
):
self.add_message("redefined-builtin", args=name, node=node)
break
if anode.frame(future=True) is module:
# module level assignment
break
if (
isinstance(anode, (nodes.ClassDef, nodes.FunctionDef))
and anode.parent is module
):
# module level function assignment
break
else:
if not_defined_locally_by_import:
# global undefined at the module scope
self.add_message("global-variable-undefined", args=name, node=node)
default_message = False
if default_message:
self.add_message("global-statement", node=node)
def visit_assignname(self, node: nodes.AssignName) -> None:
if isinstance(node.assign_type(), nodes.AugAssign):
self.visit_name(node)
def visit_delname(self, node: nodes.DelName) -> None:
self.visit_name(node)
def visit_name(self, node: nodes.Name) -> None:
"""Don't add the 'utils.check_messages' decorator here!
It's important that all 'Name' nodes are visited, otherwise the
'NamesConsumers' won't be correct.
"""
stmt = node.statement(future=True)
if stmt.fromlineno is None:
# name node from an astroid built from live code, skip
assert not stmt.root().file.endswith(".py")
return
self._undefined_and_used_before_checker(node, stmt)
if self._is_undefined_loop_variable_enabled:
self._loopvar_name(node)
@utils.check_messages("redefined-outer-name")
def visit_excepthandler(self, node: nodes.ExceptHandler) -> None:
if not node.name or not isinstance(node.name, nodes.AssignName):
return
for outer_except, outer_except_assign_name in self._except_handler_names_queue:
if node.name.name == outer_except_assign_name.name:
self.add_message(
"redefined-outer-name",
args=(outer_except_assign_name.name, outer_except.fromlineno),
node=node,
)
break
self._except_handler_names_queue.append((node, node.name))
@utils.check_messages("redefined-outer-name")
def leave_excepthandler(self, node: nodes.ExceptHandler) -> None:
if not node.name or not isinstance(node.name, nodes.AssignName):
return
self._except_handler_names_queue.pop()
def _undefined_and_used_before_checker(
self, node: nodes.Name, stmt: nodes.NodeNG
) -> None:
frame = stmt.scope()
start_index = len(self._to_consume) - 1
# iterates through parent scopes, from the inner to the outer
base_scope_type = self._to_consume[start_index].scope_type
for i in range(start_index, -1, -1):
current_consumer = self._to_consume[i]
# Certain nodes shouldn't be checked as they get checked another time
if self._should_node_be_skipped(node, current_consumer, i == start_index):
continue
action, found_nodes = self._check_consumer(
node, stmt, frame, current_consumer, i, base_scope_type
)
if action is VariableVisitConsumerAction.CONTINUE:
continue
if action is VariableVisitConsumerAction.CONSUME:
# Any nodes added to consumed_uncertain by get_next_to_consume()
# should be added back so that they are marked as used.
# They will have already had a chance to emit used-before-assignment.
# We check here instead of before every single return in _check_consumer()
found_nodes += current_consumer.consumed_uncertain[node.name] # type: ignore[operator]
current_consumer.mark_as_consumed(node.name, found_nodes)
if action in {
VariableVisitConsumerAction.RETURN,
VariableVisitConsumerAction.CONSUME,
}:
return
# we have not found the name, if it isn't a builtin, that's an
# undefined name !
if (
self._is_undefined_variable_enabled
and not (
node.name in nodes.Module.scope_attrs
or utils.is_builtin(node.name)
or node.name in self.config.additional_builtins
or (
node.name == "__class__"
and isinstance(frame, nodes.FunctionDef)
and frame.is_method()
)
)
and not utils.node_ignores_exception(node, NameError)
):
self.add_message("undefined-variable", args=node.name, node=node)
def _should_node_be_skipped(
self, node: nodes.Name, consumer: NamesConsumer, is_start_index: bool
) -> bool:
"""Tests a consumer and node for various conditions in which the node
shouldn't be checked for the undefined-variable and used-before-assignment checks.
"""
if consumer.scope_type == "class":
# The list of base classes in the class definition is not part
# of the class body.
# If the current scope is a class scope but it's not the inner
# scope, ignore it. This prevents to access this scope instead of
# the globals one in function members when there are some common
# names.
if utils.is_ancestor_name(consumer.node, node) or (
not is_start_index and self._ignore_class_scope(node)
):
return True
# Ignore inner class scope for keywords in class definition
if isinstance(node.parent, nodes.Keyword) and isinstance(
node.parent.parent, nodes.ClassDef
):
return True
elif consumer.scope_type == "function" and self._defined_in_function_definition(
node, consumer.node
):
# If the name node is used as a function default argument's value or as
# a decorator, then start from the parent frame of the function instead
# of the function frame - and thus open an inner class scope
return True
elif consumer.scope_type == "lambda" and utils.is_default_argument(
node, consumer.node
):
return True
return False
# pylint: disable=too-many-return-statements
def _check_consumer(
self,
node: nodes.Name,
stmt: nodes.NodeNG,
frame: nodes.LocalsDictNodeNG,
current_consumer: NamesConsumer,
consumer_level: int,
base_scope_type: Any,
) -> Union[
Tuple[
Union[
Literal[VariableVisitConsumerAction.CONTINUE],
Literal[VariableVisitConsumerAction.RETURN],
],
None,
],
Tuple[Literal[VariableVisitConsumerAction.CONSUME], List[nodes.NodeNG]],
]:
"""Checks a consumer for conditions that should trigger messages"""
# If the name has already been consumed, only check it's not a loop
# variable used outside the loop.
# Avoid the case where there are homonyms inside function scope and
# comprehension current scope (avoid bug #1731)
if node.name in current_consumer.consumed:
if utils.is_func_decorator(current_consumer.node) or not (
current_consumer.scope_type == "comprehension"
and self._has_homonym_in_upper_function_scope(node, consumer_level)
# But don't catch homonyms against the filter of a comprehension,
# (like "if x" in "[x for x in expr() if x]")
# https://github.com/PyCQA/pylint/issues/5586
and not (
isinstance(node.parent.parent, nodes.Comprehension)
and node.parent in node.parent.parent.ifs
)
):
self._check_late_binding_closure(node)
self._loopvar_name(node)
return (VariableVisitConsumerAction.RETURN, None)
found_nodes = current_consumer.get_next_to_consume(node)
if found_nodes is None:
return (VariableVisitConsumerAction.CONTINUE, None)
if not found_nodes:
self.add_message("used-before-assignment", args=node.name, node=node)
if current_consumer.consumed_uncertain[node.name]:
# If there are nodes added to consumed_uncertain by
# get_next_to_consume() because they might not have executed,
# return a CONSUME action so that _undefined_and_used_before_checker()
# will mark them as used
return (VariableVisitConsumerAction.CONSUME, found_nodes)
return (VariableVisitConsumerAction.RETURN, None)
self._check_late_binding_closure(node)
if not (
self._is_undefined_variable_enabled
or self._is_used_before_assignment_enabled
):
return (VariableVisitConsumerAction.CONSUME, found_nodes)
defnode = utils.assign_parent(found_nodes[0])
defstmt = defnode.statement(future=True)
defframe = defstmt.frame(future=True)
# The class reuses itself in the class scope.
is_recursive_klass = (
frame is defframe
and defframe.parent_of(node)
and isinstance(defframe, nodes.ClassDef)
and node.name == defframe.name
)
if (
is_recursive_klass
and utils.get_node_first_ancestor_of_type(node, nodes.Lambda)
and (
not utils.is_default_argument(node)
or node.scope().parent.scope() is not defframe
)
):
# Self-referential class references are fine in lambda's --
# As long as they are not part of the default argument directly
# under the scope of the parent self-referring class.
# Example of valid default argument:
# class MyName3:
# myattr = 1
# mylambda3 = lambda: lambda a=MyName3: a
# Example of invalid default argument:
# class MyName4:
# myattr = 1
# mylambda4 = lambda a=MyName4: lambda: a
# If the above conditional is True,
# there is no possibility of undefined-variable
# Also do not consume class name
# (since consuming blocks subsequent checks)
# -- quit
return (VariableVisitConsumerAction.RETURN, None)
(
maybe_before_assign,
annotation_return,
use_outer_definition,
) = self._is_variable_violation(
node,
defnode,
stmt,
defstmt,
frame,
defframe,
base_scope_type,
is_recursive_klass,
)
if use_outer_definition:
return (VariableVisitConsumerAction.CONTINUE, None)
if (
maybe_before_assign
and not utils.is_defined_before(node)
and not astroid.are_exclusive(stmt, defstmt, ("NameError",))
):
# Used and defined in the same place, e.g `x += 1` and `del x`
defined_by_stmt = defstmt is stmt and isinstance(
node, (nodes.DelName, nodes.AssignName)
)
if (
is_recursive_klass
or defined_by_stmt
or annotation_return
or isinstance(defstmt, nodes.Delete)
):
if not utils.node_ignores_exception(node, NameError):
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(
stmt,
(
nodes.AnnAssign,
nodes.FunctionDef,
nodes.Arguments,
),
)
and node.name in node.root().locals
):
self.add_message(
"undefined-variable", args=node.name, node=node
)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
elif base_scope_type != "lambda":
# E0601 may *not* occurs in lambda scope.
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(stmt, (nodes.AnnAssign, nodes.FunctionDef))
):
self.add_message(
"used-before-assignment", args=node.name, node=node
)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
elif base_scope_type == "lambda":
# E0601 can occur in class-level scope in lambdas, as in
# the following example:
# class A:
# x = lambda attr: f + attr
# f = 42
# We check lineno because doing the following is fine:
# class A:
# x = 42
# y = lambda attr: x + attr
if (
isinstance(frame, nodes.ClassDef)
and node.name in frame.locals
and stmt.fromlineno <= defstmt.fromlineno
):
self.add_message(
"used-before-assignment", args=node.name, node=node
)
elif self._is_only_type_assignment(node, defstmt):
self.add_message("undefined-variable", args=node.name, node=node)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
elif isinstance(defstmt, nodes.ClassDef):
is_first_level_ref = self._is_first_level_self_reference(node, defstmt)
if is_first_level_ref == 2:
self.add_message("used-before-assignment", node=node, args=node.name)
if is_first_level_ref:
return (VariableVisitConsumerAction.RETURN, None)
elif isinstance(defnode, nodes.NamedExpr):
if isinstance(defnode.parent, nodes.IfExp):
if self._is_never_evaluated(defnode, defnode.parent):
self.add_message("undefined-variable", args=node.name, node=node)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
@utils.check_messages("no-name-in-module")
def visit_import(self, node: nodes.Import) -> None:
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
if utils.is_node_in_guarded_import_block(node) is True:
# Don't verify import if part of guarded import block
# I.e. `sys.version_info` or `typing.TYPE_CHECKING`
return
for name, _ in node.names:
parts = name.split(".")
try:
module = next(_infer_name_module(node, parts[0]))
except astroid.ResolveError:
continue
if not isinstance(module, nodes.Module):
continue
self._check_module_attrs(node, module, parts[1:])
@utils.check_messages("no-name-in-module")
def visit_importfrom(self, node: nodes.ImportFrom) -> None:
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
if utils.is_node_in_guarded_import_block(node) is True:
# Don't verify import if part of guarded import block
# I.e. `sys.version_info` or `typing.TYPE_CHECKING`
return
name_parts = node.modname.split(".")
try:
module = node.do_import_module(name_parts[0])
except astroid.AstroidBuildingException:
return
module = self._check_module_attrs(node, module, name_parts[1:])
if not module:
return
for name, _ in node.names:
if name == "*":
continue
self._check_module_attrs(node, module, name.split("."))
@utils.check_messages(
"unbalanced-tuple-unpacking", "unpacking-non-sequence", "self-cls-assignment"
)
def visit_assign(self, node: nodes.Assign) -> None:
"""Check unbalanced tuple unpacking for assignments
and unpacking non-sequences as well as in case self/cls
get assigned.
"""
self._check_self_cls_assign(node)
if not isinstance(node.targets[0], (nodes.Tuple, nodes.List)):
return
targets = node.targets[0].itered()
try:
inferred = utils.safe_infer(node.value)
if inferred is not None:
self._check_unpacking(inferred, node, targets)
except astroid.InferenceError:
return
# listcomp have now also their scope
def visit_listcomp(self, node: nodes.ListComp) -> None:
"""visit dictcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_listcomp(self, _: nodes.ListComp) -> None:
"""leave dictcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def leave_assign(self, node: nodes.Assign) -> None:
self._store_type_annotation_names(node)
def leave_with(self, node: nodes.With) -> None:
self._store_type_annotation_names(node)
def visit_arguments(self, node: nodes.Arguments) -> None:
for annotation in node.type_comment_args:
self._store_type_annotation_node(annotation)
# Relying on other checker's options, which might not have been initialized yet.
@astroid.decorators.cachedproperty
def _analyse_fallback_blocks(self):
return get_global_option(self, "analyse-fallback-blocks", default=False)
@astroid.decorators.cachedproperty
def _ignored_modules(self):
return get_global_option(self, "ignored-modules", default=[])
@astroid.decorators.cachedproperty
def _allow_global_unused_variables(self):
return get_global_option(self, "allow-global-unused-variables", default=True)
@staticmethod
def _defined_in_function_definition(node, frame):
in_annotation_or_default_or_decorator = False
if (
isinstance(frame, nodes.FunctionDef)
and node.statement(future=True) is frame
):
in_annotation_or_default_or_decorator = (
(
node in frame.args.annotations
or node in frame.args.posonlyargs_annotations
or node in frame.args.kwonlyargs_annotations
or node is frame.args.varargannotation
or node is frame.args.kwargannotation
)
or frame.args.parent_of(node)
or (frame.decorators and frame.decorators.parent_of(node))
or (
frame.returns
and (node is frame.returns or frame.returns.parent_of(node))
)
)
return in_annotation_or_default_or_decorator
@staticmethod
def _in_lambda_or_comprehension_body(
node: nodes.NodeNG, frame: nodes.NodeNG
) -> bool:
"""return True if node within a lambda/comprehension body (or similar) and thus should not have access to class attributes in frame"""
child = node
parent = node.parent
while parent is not None:
if parent is frame:
return False
if isinstance(parent, nodes.Lambda) and child is not parent.args:
# Body of lambda should not have access to class attributes.
return True
if isinstance(parent, nodes.Comprehension) and child is not parent.iter:
# Only iter of list/set/dict/generator comprehension should have access.
return True
if isinstance(parent, nodes.ComprehensionScope) and not (
parent.generators and child is parent.generators[0]
):
# Body of list/set/dict/generator comprehension should not have access to class attributes.
# Furthermore, only the first generator (if multiple) in comprehension should have access.
return True
child = parent
parent = parent.parent
return False
@staticmethod
def _is_variable_violation(
node: nodes.Name,
defnode,
stmt: nodes.Statement,
defstmt: nodes.Statement,
frame, # scope of statement of node
defframe,
base_scope_type,
is_recursive_klass,
) -> Tuple[bool, bool, bool]:
# pylint: disable=too-many-nested-blocks
maybe_before_assign = True
annotation_return = False
use_outer_definition = False
if frame is not defframe:
maybe_before_assign = _detect_global_scope(node, frame, defframe)
elif defframe.parent is None:
# we are at the module level, check the name is not
# defined in builtins
if (
node.name in defframe.scope_attrs
or astroid.builtin_lookup(node.name)[1]
):
maybe_before_assign = False
else:
# we are in a local scope, check the name is not
# defined in global or builtin scope
# skip this lookup if name is assigned later in function scope/lambda
# Note: the node.frame() is not the same as the `frame` argument which is
# equivalent to frame.statement().scope()
forbid_lookup = (
isinstance(frame, nodes.FunctionDef)
or isinstance(node.frame(future=True), nodes.Lambda)
) and _assigned_locally(node)
if not forbid_lookup and defframe.root().lookup(node.name)[1]:
maybe_before_assign = False
use_outer_definition = stmt == defstmt and not isinstance(
defnode, nodes.Comprehension
)
# check if we have a nonlocal
elif node.name in defframe.locals:
maybe_before_assign = not any(
isinstance(child, nodes.Nonlocal) and node.name in child.names
for child in defframe.get_children()
)
if (
base_scope_type == "lambda"
and isinstance(frame, nodes.ClassDef)
and node.name in frame.locals
):
# This rule verifies that if the definition node of the
# checked name is an Arguments node and if the name
# is used a default value in the arguments defaults
# and the actual definition of the variable label
# is happening before the Arguments definition.
#
# bar = None
# foo = lambda bar=bar: bar
#
# In this case, maybe_before_assign should be False, otherwise
# it should be True.
maybe_before_assign = not (
isinstance(defnode, nodes.Arguments)
and node in defnode.defaults
and frame.locals[node.name][0].fromlineno < defstmt.fromlineno
)
elif isinstance(defframe, nodes.ClassDef) and isinstance(
frame, nodes.FunctionDef
):
# Special rule for function return annotations,
# using a name defined earlier in the class containing the function.
if node is frame.returns and defframe.parent_of(frame.returns):
annotation_return = True
if (
frame.returns.name in defframe.locals
and defframe.locals[node.name][0].lineno < frame.lineno
):
# Detect class assignments with a name defined earlier in the
# class. In this case, no warning should be raised.
maybe_before_assign = False
else:
maybe_before_assign = True
if isinstance(node.parent, nodes.Arguments):
maybe_before_assign = stmt.fromlineno <= defstmt.fromlineno
elif is_recursive_klass:
maybe_before_assign = True
else:
maybe_before_assign = (
maybe_before_assign and stmt.fromlineno <= defstmt.fromlineno
)
if maybe_before_assign and stmt.fromlineno == defstmt.fromlineno:
if (
isinstance(defframe, nodes.FunctionDef)
and frame is defframe
and defframe.parent_of(node)
and stmt is not defstmt
):
# Single statement function, with the statement on the
# same line as the function definition
maybe_before_assign = False
elif (
isinstance( # pylint: disable=too-many-boolean-expressions
defstmt,
(
nodes.Assign,
nodes.AnnAssign,
nodes.AugAssign,
nodes.Expr,
nodes.Return,
),
)
and (
isinstance(defstmt.value, nodes.IfExp)
or isinstance(defstmt.value, nodes.Lambda)
and isinstance(defstmt.value.body, nodes.IfExp)
)
and frame is defframe
and defframe.parent_of(node)
and stmt is defstmt
):
# Single statement if, with assignment expression on same
# line as assignment
# x = b if (b := True) else False
maybe_before_assign = False
elif (
isinstance( # pylint: disable=too-many-boolean-expressions
defnode, nodes.NamedExpr
)
and frame is defframe
and defframe.parent_of(stmt)
and stmt is defstmt
and (
(
defnode.lineno == node.lineno
and defnode.col_offset < node.col_offset
)
or (defnode.lineno < node.lineno)
or (
# Issue in the `ast` module until py39
# Nodes in a multiline string have the same lineno
# Could be false-positive without check
not PY39_PLUS
and defnode.lineno == node.lineno
and isinstance(
defstmt,
(
nodes.Assign,
nodes.AnnAssign,
nodes.AugAssign,
nodes.Return,
),
)
and isinstance(defstmt.value, nodes.JoinedStr)
)
)
):
# Expressions, with assignment expressions
# Use only after assignment
# b = (c := 2) and c
maybe_before_assign = False
# Look for type checking definitions inside a type checking guard.
if isinstance(defstmt, (nodes.Import, nodes.ImportFrom)):
defstmt_parent = defstmt.parent
if (
isinstance(defstmt_parent, nodes.If)
and defstmt_parent.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
):
# Exempt those definitions that are used inside the type checking
# guard or that are defined in both type checking guard branches.
used_in_branch = defstmt_parent.parent_of(node)
defined_in_or_else = False
for definition in defstmt_parent.orelse:
if isinstance(definition, nodes.Assign):
defined_in_or_else = any(
target.name == node.name
for target in definition.targets
if isinstance(target, nodes.AssignName)
)
if defined_in_or_else:
break
if not used_in_branch and not defined_in_or_else:
maybe_before_assign = True
return maybe_before_assign, annotation_return, use_outer_definition
@staticmethod
def _is_only_type_assignment(node: nodes.Name, defstmt: nodes.Statement) -> bool:
"""Check if variable only gets assigned a type and never a value"""
if not isinstance(defstmt, nodes.AnnAssign) or defstmt.value:
return False
defstmt_frame = defstmt.frame(future=True)
node_frame = node.frame(future=True)
parent = node
while parent is not defstmt_frame.parent:
parent_scope = parent.scope()
local_refs = parent_scope.locals.get(node.name, [])
for ref_node in local_refs:
# If local ref is in the same frame as our node, but on a later lineno
# we don't actually care about this local ref.
# Local refs are ordered, so we break.
# print(var)
# var = 1 # <- irrelevant
if defstmt_frame == node_frame and ref_node.lineno > node.lineno:
break
# If the parent of the local reference is anything but an AnnAssign
# Or if the AnnAssign adds a value the variable will now have a value
# var = 1 # OR
# var: int = 1
if (
not isinstance(ref_node.parent, nodes.AnnAssign)
or ref_node.parent.value
):
return False
parent = parent_scope.parent
return True
@staticmethod
def _is_first_level_self_reference(
node: nodes.Name, defstmt: nodes.ClassDef
) -> Literal[0, 1, 2]:
"""Check if a first level method's annotation or default values
refers to its own class.
Return values correspond to:
0 = Continue
1 = Break
2 = Break + emit message
"""
if node.frame(future=True).parent == defstmt and node.statement(
future=True
) == node.frame(future=True):
# Check if used as type annotation
# Break but don't emit message if postponed evaluation is enabled
if utils.is_node_in_type_annotation_context(node):
if not utils.is_postponed_evaluation_enabled(node):
return 2
return 1
# Check if used as default value by calling the class
if isinstance(node.parent, nodes.Call) and isinstance(
node.parent.parent, nodes.Arguments
):
return 2
return 0
@staticmethod
def _is_never_evaluated(
defnode: nodes.NamedExpr, defnode_parent: nodes.IfExp
) -> bool:
"""Check if a NamedExpr is inside a side of if ... else that never
gets evaluated
"""
inferred_test = utils.safe_infer(defnode_parent.test)
if isinstance(inferred_test, nodes.Const):
if inferred_test.value is True and defnode == defnode_parent.orelse:
return True
if inferred_test.value is False and defnode == defnode_parent.body:
return True
return False
def _ignore_class_scope(self, node):
"""Return True if the node is in a local class scope, as an assignment.
:param node: Node considered
:type node: astroid.Node
:return: True if the node is in a local class scope, as an assignment. False otherwise.
:rtype: bool
"""
# Detect if we are in a local class scope, as an assignment.
# For example, the following is fair game.
#
# class A:
# b = 1
# c = lambda b=b: b * b
#
# class B:
# tp = 1
# def func(self, arg: tp):
# ...
# class C:
# tp = 2
# def func(self, arg=tp):
# ...
# class C:
# class Tp:
# pass
# class D(Tp):
# ...
name = node.name
frame = node.statement(future=True).scope()
in_annotation_or_default_or_decorator = self._defined_in_function_definition(
node, frame
)
in_ancestor_list = utils.is_ancestor_name(frame, node)
if in_annotation_or_default_or_decorator or in_ancestor_list:
frame_locals = frame.parent.scope().locals
else:
frame_locals = frame.locals
return not (
(isinstance(frame, nodes.ClassDef) or in_annotation_or_default_or_decorator)
and not self._in_lambda_or_comprehension_body(node, frame)
and name in frame_locals
)
def _loopvar_name(self, node: astroid.Name) -> None:
# filter variables according to node's scope
astmts = [s for s in node.lookup(node.name)[1] if hasattr(s, "assign_type")]
# If this variable usage exists inside a function definition
# that exists in the same loop,
# the usage is safe because the function will not be defined either if
# the variable is not defined.
scope = node.scope()
if isinstance(scope, nodes.FunctionDef) and any(
asmt.scope().parent_of(scope) for asmt in astmts
):
return
# Filter variables according to their respective scope. Test parent
# and statement to avoid #74747. This is not a total fix, which would
# introduce a mechanism similar to special attribute lookup in
# modules. Also, in order to get correct inference in this case, the
# scope lookup rules would need to be changed to return the initial
# assignment (which does not exist in code per se) as well as any later
# modifications.
# pylint: disable-next=too-many-boolean-expressions
if (
not astmts
or (
astmts[0].parent == astmts[0].root()
and astmts[0].parent.parent_of(node)
)
or (
astmts[0].is_statement
or not isinstance(astmts[0].parent, nodes.Module)
and astmts[0].statement(future=True).parent_of(node)
)
):
_astmts = []
else:
_astmts = astmts[:1]
for i, stmt in enumerate(astmts[1:]):
if astmts[i].statement(future=True).parent_of(
stmt
) and not in_for_else_branch(astmts[i].statement(future=True), stmt):
continue
_astmts.append(stmt)
astmts = _astmts
if len(astmts) != 1:
return
assign = astmts[0].assign_type()
if not (
isinstance(assign, (nodes.For, nodes.Comprehension, nodes.GeneratorExp))
and assign.statement(future=True) is not node.statement(future=True)
):
return
# For functions we can do more by inferring the length of the itered object
if not isinstance(assign, nodes.For):
self.add_message("undefined-loop-variable", args=node.name, node=node)
return
try:
inferred = next(assign.iter.infer())
except astroid.InferenceError:
self.add_message("undefined-loop-variable", args=node.name, node=node)
else:
if (
isinstance(inferred, astroid.Instance)
and inferred.qname() == BUILTIN_RANGE
):
# Consider range() objects safe, even if they might not yield any results.
return
# Consider sequences.
sequences = (
nodes.List,
nodes.Tuple,
nodes.Dict,
nodes.Set,
astroid.objects.FrozenSet,
)
if not isinstance(inferred, sequences):
self.add_message("undefined-loop-variable", args=node.name, node=node)
return
elements = getattr(inferred, "elts", getattr(inferred, "items", []))
if not elements:
self.add_message("undefined-loop-variable", args=node.name, node=node)
def _check_is_unused(self, name, node, stmt, global_names, nonlocal_names):
# Ignore some special names specified by user configuration.
if self._is_name_ignored(stmt, name):
return
# Ignore names that were added dynamically to the Function scope
if (
isinstance(node, nodes.FunctionDef)
and name == "__class__"
and len(node.locals["__class__"]) == 1
and isinstance(node.locals["__class__"][0], nodes.ClassDef)
):
return
# Ignore names imported by the global statement.
if isinstance(stmt, (nodes.Global, nodes.Import, nodes.ImportFrom)):
# Detect imports, assigned to global statements.
if global_names and _import_name_is_global(stmt, global_names):
return
argnames = list(
itertools.chain(node.argnames(), [arg.name for arg in node.args.kwonlyargs])
)
# Care about functions with unknown argument (builtins)
if name in argnames:
self._check_unused_arguments(name, node, stmt, argnames)
else:
if stmt.parent and isinstance(
stmt.parent, (nodes.Assign, nodes.AnnAssign, nodes.Tuple)
):
if name in nonlocal_names:
return
qname = asname = None
if isinstance(stmt, (nodes.Import, nodes.ImportFrom)):
# Need the complete name, which we don't have in .locals.
if len(stmt.names) > 1:
import_names = next(
(names for names in stmt.names if name in names), None
)
else:
import_names = stmt.names[0]
if import_names:
qname, asname = import_names
name = asname or qname
if _has_locals_call_after_node(stmt, node.scope()):
message_name = "possibly-unused-variable"
else:
if isinstance(stmt, nodes.Import):
if asname is not None:
msg = f"{qname} imported as {asname}"
else:
msg = f"import {name}"
self.add_message("unused-import", args=msg, node=stmt)
return
if isinstance(stmt, nodes.ImportFrom):
if asname is not None:
msg = f"{qname} imported from {stmt.modname} as {asname}"
else:
msg = f"{name} imported from {stmt.modname}"
self.add_message("unused-import", args=msg, node=stmt)
return
message_name = "unused-variable"
if isinstance(stmt, nodes.FunctionDef) and stmt.decorators:
return
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
# Special case for exception variable
if isinstance(stmt.parent, nodes.ExceptHandler) and any(
n.name == name for n in stmt.parent.nodes_of_class(nodes.Name)
):
return
self.add_message(message_name, args=name, node=stmt)
def _is_name_ignored(self, stmt, name):
authorized_rgx = self.config.dummy_variables_rgx
if (
isinstance(stmt, nodes.AssignName)
and isinstance(stmt.parent, nodes.Arguments)
or isinstance(stmt, nodes.Arguments)
):
regex = self.config.ignored_argument_names
else:
regex = authorized_rgx
return regex and regex.match(name)
def _check_unused_arguments(self, name, node, stmt, argnames):
is_method = node.is_method()
klass = node.parent.frame(future=True)
if is_method and isinstance(klass, nodes.ClassDef):
confidence = (
INFERENCE if utils.has_known_bases(klass) else INFERENCE_FAILURE
)
else:
confidence = HIGH
if is_method:
# Don't warn for the first argument of a (non static) method
if node.type != "staticmethod" and name == argnames[0]:
return
# Don't warn for argument of an overridden method
overridden = overridden_method(klass, node.name)
if overridden is not None and name in overridden.argnames():
return
if node.name in utils.PYMETHODS and node.name not in (
"__init__",
"__new__",
):
return
# Don't check callback arguments
if any(
node.name.startswith(cb) or node.name.endswith(cb)
for cb in self.config.callbacks
):
return
# Don't check arguments of singledispatch.register function.
if utils.is_registered_in_singledispatch_function(node):
return
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
# Don't check protocol classes
if utils.is_protocol_class(klass):
return
self.add_message("unused-argument", args=name, node=stmt, confidence=confidence)
def _check_late_binding_closure(self, node: nodes.Name) -> None:
"""Check whether node is a cell var that is assigned within a containing loop.
Special cases where we don't care about the error:
1. When the node's function is immediately called, e.g. (lambda: i)()
2. When the node's function is returned from within the loop, e.g. return lambda: i
"""
if not self.linter.is_message_enabled("cell-var-from-loop"):
return
node_scope = node.frame(future=True)
# If node appears in a default argument expression,
# look at the next enclosing frame instead
if utils.is_default_argument(node, node_scope):
node_scope = node_scope.parent.frame(future=True)
# Check if node is a cell var
if (
not isinstance(node_scope, (nodes.Lambda, nodes.FunctionDef))
or node.name in node_scope.locals
):
return
assign_scope, stmts = node.lookup(node.name)
if not stmts or not assign_scope.parent_of(node_scope):
return
if utils.is_comprehension(assign_scope):
self.add_message("cell-var-from-loop", node=node, args=node.name)
else:
# Look for an enclosing For loop.
# Currently, we only consider the first assignment
assignment_node = stmts[0]
maybe_for = assignment_node
while maybe_for and not isinstance(maybe_for, nodes.For):
if maybe_for is assign_scope:
break
maybe_for = maybe_for.parent
else:
if (
maybe_for
and maybe_for.parent_of(node_scope)
and not utils.is_being_called(node_scope)
and node_scope.parent
and not isinstance(node_scope.statement(future=True), nodes.Return)
):
self.add_message("cell-var-from-loop", node=node, args=node.name)
def _should_ignore_redefined_builtin(self, stmt):
if not isinstance(stmt, nodes.ImportFrom):
return False
return stmt.modname in self.config.redefining_builtins_modules
def _allowed_redefined_builtin(self, name):
return name in self.config.allowed_redefined_builtins
def _has_homonym_in_upper_function_scope(
self, node: nodes.Name, index: int
) -> bool:
"""Return whether there is a node with the same name in the
to_consume dict of an upper scope and if that scope is a
function
:param node: node to check for
:param index: index of the current consumer inside self._to_consume
:return: True if there is a node with the same name in the
to_consume dict of an upper scope and if that scope
is a function, False otherwise
"""
return any(
_consumer.scope_type == "function" and node.name in _consumer.to_consume
for _consumer in self._to_consume[index - 1 :: -1]
)
def _store_type_annotation_node(self, type_annotation):
"""Given a type annotation, store all the name nodes it refers to"""
if isinstance(type_annotation, nodes.Name):
self._type_annotation_names.append(type_annotation.name)
return
if isinstance(type_annotation, nodes.Attribute):
self._store_type_annotation_node(type_annotation.expr)
return
if not isinstance(type_annotation, nodes.Subscript):
return
if (
isinstance(type_annotation.value, nodes.Attribute)
and isinstance(type_annotation.value.expr, nodes.Name)
and type_annotation.value.expr.name == TYPING_MODULE
):
self._type_annotation_names.append(TYPING_MODULE)
return
self._type_annotation_names.extend(
annotation.name for annotation in type_annotation.nodes_of_class(nodes.Name)
)
def _store_type_annotation_names(self, node):
type_annotation = node.type_annotation
if not type_annotation:
return
self._store_type_annotation_node(node.type_annotation)
def _check_self_cls_assign(self, node: nodes.Assign) -> None:
"""Check that self/cls don't get assigned"""
assign_names: Set[Optional[str]] = set()
for target in node.targets:
if isinstance(target, nodes.AssignName):
assign_names.add(target.name)
elif isinstance(target, nodes.Tuple):
assign_names.update(
elt.name for elt in target.elts if isinstance(elt, nodes.AssignName)
)
scope = node.scope()
nonlocals_with_same_name = any(
child for child in scope.body if isinstance(child, nodes.Nonlocal)
)
if nonlocals_with_same_name:
scope = node.scope().parent.scope()
if not (
isinstance(scope, nodes.FunctionDef)
and scope.is_method()
and "builtins.staticmethod" not in scope.decoratornames()
):
return
argument_names = scope.argnames()
if not argument_names:
return
self_cls_name = argument_names[0]
if self_cls_name in assign_names:
self.add_message("self-cls-assignment", node=node, args=(self_cls_name,))
def _check_unpacking(self, inferred, node, targets):
"""Check for unbalanced tuple unpacking
and unpacking non sequences.
"""
if utils.is_inside_abstract_class(node):
return
if utils.is_comprehension(node):
return
if inferred is astroid.Uninferable:
return
if (
isinstance(inferred.parent, nodes.Arguments)
and isinstance(node.value, nodes.Name)
and node.value.name == inferred.parent.vararg
):
# Variable-length argument, we can't determine the length.
return
# Attempt to check unpacking is properly balanced
values: Optional[List] = None
if isinstance(inferred, (nodes.Tuple, nodes.List)):
values = inferred.itered()
elif isinstance(inferred, astroid.Instance) and any(
ancestor.qname() == "typing.NamedTuple" for ancestor in inferred.ancestors()
):
values = [i for i in inferred.values() if isinstance(i, nodes.AssignName)]
if values:
if len(targets) != len(values):
# Check if we have starred nodes.
if any(isinstance(target, nodes.Starred) for target in targets):
return
self.add_message(
"unbalanced-tuple-unpacking",
node=node,
args=(
_get_unpacking_extra_info(node, inferred),
len(targets),
len(values),
),
)
# attempt to check unpacking may be possible (ie RHS is iterable)
elif not utils.is_iterable(inferred):
self.add_message(
"unpacking-non-sequence",
node=node,
args=(_get_unpacking_extra_info(node, inferred),),
)
def _check_module_attrs(self, node, module, module_names):
"""check that module_names (list of string) are accessible through the
given module
if the latest access name corresponds to a module, return it
"""
while module_names:
name = module_names.pop(0)
if name == "__dict__":
module = None
break
try:
module = next(module.getattr(name)[0].infer())
if module is astroid.Uninferable:
return None
except astroid.NotFoundError:
if module.name in self._ignored_modules:
return None
self.add_message(
"no-name-in-module", args=(name, module.name), node=node
)
return None
except astroid.InferenceError:
return None
if module_names:
modname = module.name if module else "__dict__"
self.add_message(
"no-name-in-module", node=node, args=(".".join(module_names), modname)
)
return None
if isinstance(module, nodes.Module):
return module
return None
def _check_all(self, node: nodes.Module, not_consumed):
assigned = next(node.igetattr("__all__"))
if assigned is astroid.Uninferable:
return
if not assigned.pytype() in {"builtins.list", "builtins.tuple"}:
line, col = assigned.tolineno, assigned.col_offset
self.add_message("invalid-all-format", line=line, col_offset=col, node=node)
return
for elt in getattr(assigned, "elts", ()):
try:
elt_name = next(elt.infer())
except astroid.InferenceError:
continue
if elt_name is astroid.Uninferable:
continue
if not elt_name.parent:
continue
if not isinstance(elt_name, nodes.Const) or not isinstance(
elt_name.value, str
):
self.add_message("invalid-all-object", args=elt.as_string(), node=elt)
continue
elt_name = elt_name.value
# If elt is in not_consumed, remove it from not_consumed
if elt_name in not_consumed:
del not_consumed[elt_name]
continue
if elt_name not in node.locals:
if not node.package:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
else:
basename = os.path.splitext(node.file)[0]
if os.path.basename(basename) == "__init__":
name = node.name + "." + elt_name
try:
astroid.modutils.file_from_modpath(name.split("."))
except ImportError:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
except SyntaxError:
# don't yield a syntax-error warning,
# because it will be later yielded
# when the file will be checked
pass
def _check_globals(self, not_consumed):
if self._allow_global_unused_variables:
return
for name, node_lst in not_consumed.items():
for node in node_lst:
self.add_message("unused-variable", args=(name,), node=node)
def _check_imports(self, not_consumed):
local_names = _fix_dot_imports(not_consumed)
checked = set()
unused_wildcard_imports: DefaultDict[
Tuple[str, nodes.ImportFrom], List[str]
] = collections.defaultdict(list)
for name, stmt in local_names:
for imports in stmt.names:
real_name = imported_name = imports[0]
if imported_name == "*":
real_name = name
as_name = imports[1]
if real_name in checked:
continue
if name not in (real_name, as_name):
continue
checked.add(real_name)
is_type_annotation_import = (
imported_name in self._type_annotation_names
or as_name in self._type_annotation_names
)
if isinstance(stmt, nodes.Import) or (
isinstance(stmt, nodes.ImportFrom) and not stmt.modname
):
if isinstance(stmt, nodes.ImportFrom) and SPECIAL_OBJ.search(
imported_name
):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if is_type_annotation_import:
# Most likely a typing import if it wasn't used so far.
continue
if as_name == "_":
continue
if as_name is None:
msg = f"import {imported_name}"
else:
msg = f"{imported_name} imported as {as_name}"
if not _is_type_checking_import(stmt):
self.add_message("unused-import", args=msg, node=stmt)
elif isinstance(stmt, nodes.ImportFrom) and stmt.modname != FUTURE:
if SPECIAL_OBJ.search(imported_name):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if _is_from_future_import(stmt, name):
# Check if the name is in fact loaded from a
# __future__ import in another module.
continue
if is_type_annotation_import:
# Most likely a typing import if it wasn't used so far.
continue
if imported_name == "*":
unused_wildcard_imports[(stmt.modname, stmt)].append(name)
else:
if as_name is None:
msg = f"{imported_name} imported from {stmt.modname}"
else:
msg = f"{imported_name} imported from {stmt.modname} as {as_name}"
if not _is_type_checking_import(stmt):
self.add_message("unused-import", args=msg, node=stmt)
# Construct string for unused-wildcard-import message
for module, unused_list in unused_wildcard_imports.items():
if len(unused_list) == 1:
arg_string = unused_list[0]
else:
arg_string = (
f"{', '.join(i for i in unused_list[:-1])} and {unused_list[-1]}"
)
self.add_message(
"unused-wildcard-import", args=(arg_string, module[0]), node=module[1]
)
del self._to_consume
def _check_metaclasses(self, node):
"""Update consumption analysis for metaclasses."""
consumed = [] # [(scope_locals, consumed_key)]
for child_node in node.get_children():
if isinstance(child_node, nodes.ClassDef):
consumed.extend(self._check_classdef_metaclasses(child_node, node))
# Pop the consumed items, in order to avoid having
# unused-import and unused-variable false positives
for scope_locals, name in consumed:
scope_locals.pop(name, None)
def _check_classdef_metaclasses(self, klass, parent_node):
if not klass._metaclass:
# Skip if this class doesn't use explicitly a metaclass, but inherits it from ancestors
return []
consumed = [] # [(scope_locals, consumed_key)]
metaclass = klass.metaclass()
name = None
if isinstance(klass._metaclass, nodes.Name):
name = klass._metaclass.name
elif isinstance(klass._metaclass, nodes.Attribute) and klass._metaclass.expr:
attr = klass._metaclass.expr
while not isinstance(attr, nodes.Name):
attr = attr.expr
name = attr.name
elif metaclass:
name = metaclass.root().name
found = False
name = METACLASS_NAME_TRANSFORMS.get(name, name)
if name:
# check enclosing scopes starting from most local
for scope_locals, _, _, _ in self._to_consume[::-1]:
found_nodes = scope_locals.get(name, [])
for found_node in found_nodes:
if found_node.lineno <= klass.lineno:
consumed.append((scope_locals, name))
found = True
break
# Check parent scope
nodes_in_parent_scope = parent_node.locals.get(name, [])
for found_node_parent in nodes_in_parent_scope:
if found_node_parent.lineno <= klass.lineno:
found = True
break
if (
not found
and not metaclass
and not (
name in nodes.Module.scope_attrs
or utils.is_builtin(name)
or name in self.config.additional_builtins
)
):
self.add_message("undefined-variable", node=klass, args=(name,))
return consumed
def register(linter: "PyLinter") -> None:
linter.register_checker(VariablesChecker(linter))
| 1 | 20,475 | Can't we return an empty list and then just compare the lengths? Thereby removing this `if`. | PyCQA-pylint | py |
@@ -151,6 +151,7 @@ class Uppy {
this.addFile = this.addFile.bind(this)
this.removeFile = this.removeFile.bind(this)
this.pauseResume = this.pauseResume.bind(this)
+ this.passesRestrictions = this.passesRestrictions.bind(this)
// ___Why throttle at 500ms?
// - We must throttle at >250ms for superfocus in Dashboard to work well (because animation takes 0.25s, and we want to wait for all animations to be over before refocusing). | 1 | const Translator = require('@uppy/utils/lib/Translator')
const ee = require('namespace-emitter')
const cuid = require('cuid')
const throttle = require('lodash.throttle')
const prettierBytes = require('@transloadit/prettier-bytes')
const match = require('mime-match')
const DefaultStore = require('@uppy/store-default')
const getFileType = require('@uppy/utils/lib/getFileType')
const getFileNameAndExtension = require('@uppy/utils/lib/getFileNameAndExtension')
const generateFileID = require('@uppy/utils/lib/generateFileID')
const supportsUploadProgress = require('./supportsUploadProgress')
const { justErrorsLogger, debugLogger } = require('./loggers')
const Plugin = require('./Plugin') // Exported from here.
class RestrictionError extends Error {
constructor (...args) {
super(...args)
this.isRestriction = true
}
}
/**
* Uppy Core module.
* Manages plugins, state updates, acts as an event bus,
* adds/removes files and metadata.
*/
class Uppy {
static VERSION = require('../package.json').version
/**
* Instantiate Uppy
*
* @param {object} opts — Uppy options
*/
constructor (opts) {
this.defaultLocale = {
strings: {
addBulkFilesFailed: {
0: 'Failed to add %{smart_count} file due to an internal error',
1: 'Failed to add %{smart_count} files due to internal errors'
},
youCanOnlyUploadX: {
0: 'You can only upload %{smart_count} file',
1: 'You can only upload %{smart_count} files'
},
youHaveToAtLeastSelectX: {
0: 'You have to select at least %{smart_count} file',
1: 'You have to select at least %{smart_count} files'
},
// The default `exceedsSize2` string only combines the `exceedsSize` string (%{backwardsCompat}) with the size.
// Locales can override `exceedsSize2` to specify a different word order. This is for backwards compat with
// Uppy 1.9.x and below which did a naive concatenation of `exceedsSize2 + size` instead of using a locale-specific
// substitution.
// TODO: In 2.0 `exceedsSize2` should be removed in and `exceedsSize` updated to use substitution.
exceedsSize2: '%{backwardsCompat} %{size}',
exceedsSize: 'This file exceeds maximum allowed size of',
inferiorSize: 'This file is smaller than the allowed size of %{size}',
youCanOnlyUploadFileTypes: 'You can only upload: %{types}',
noNewAlreadyUploading: 'Cannot add new files: already uploading',
noDuplicates: 'Cannot add the duplicate file \'%{fileName}\', it already exists',
companionError: 'Connection with Companion failed',
companionUnauthorizeHint: 'To unauthorize to your %{provider} account, please go to %{url}',
failedToUpload: 'Failed to upload %{file}',
noInternetConnection: 'No Internet connection',
connectedToInternet: 'Connected to the Internet',
// Strings for remote providers
noFilesFound: 'You have no files or folders here',
selectX: {
0: 'Select %{smart_count}',
1: 'Select %{smart_count}'
},
selectAllFilesFromFolderNamed: 'Select all files from folder %{name}',
unselectAllFilesFromFolderNamed: 'Unselect all files from folder %{name}',
selectFileNamed: 'Select file %{name}',
unselectFileNamed: 'Unselect file %{name}',
openFolderNamed: 'Open folder %{name}',
cancel: 'Cancel',
logOut: 'Log out',
filter: 'Filter',
resetFilter: 'Reset filter',
loading: 'Loading...',
authenticateWithTitle: 'Please authenticate with %{pluginName} to select files',
authenticateWith: 'Connect to %{pluginName}',
emptyFolderAdded: 'No files were added from empty folder',
folderAdded: {
0: 'Added %{smart_count} file from %{folder}',
1: 'Added %{smart_count} files from %{folder}'
}
}
}
const defaultOptions = {
id: 'uppy',
autoProceed: false,
allowMultipleUploads: true,
debug: false,
restrictions: {
maxFileSize: null,
minFileSize: null,
maxTotalFileSize: null,
maxNumberOfFiles: null,
minNumberOfFiles: null,
allowedFileTypes: null
},
meta: {},
onBeforeFileAdded: (currentFile, files) => currentFile,
onBeforeUpload: (files) => files,
store: DefaultStore(),
logger: justErrorsLogger
}
// Merge default options with the ones set by user,
// making sure to merge restrictions too
this.opts = {
...defaultOptions,
...opts,
restrictions: {
...defaultOptions.restrictions,
...(opts && opts.restrictions)
}
}
// Support debug: true for backwards-compatability, unless logger is set in opts
// opts instead of this.opts to avoid comparing objects — we set logger: justErrorsLogger in defaultOptions
if (opts && opts.logger && opts.debug) {
this.log('You are using a custom `logger`, but also set `debug: true`, which uses built-in logger to output logs to console. Ignoring `debug: true` and using your custom `logger`.', 'warning')
} else if (opts && opts.debug) {
this.opts.logger = debugLogger
}
this.log(`Using Core v${this.constructor.VERSION}`)
if (this.opts.restrictions.allowedFileTypes &&
this.opts.restrictions.allowedFileTypes !== null &&
!Array.isArray(this.opts.restrictions.allowedFileTypes)) {
throw new TypeError('`restrictions.allowedFileTypes` must be an array')
}
this.i18nInit()
// Container for different types of plugins
this.plugins = {}
this.getState = this.getState.bind(this)
this.getPlugin = this.getPlugin.bind(this)
this.setFileMeta = this.setFileMeta.bind(this)
this.setFileState = this.setFileState.bind(this)
this.log = this.log.bind(this)
this.info = this.info.bind(this)
this.hideInfo = this.hideInfo.bind(this)
this.addFile = this.addFile.bind(this)
this.removeFile = this.removeFile.bind(this)
this.pauseResume = this.pauseResume.bind(this)
// ___Why throttle at 500ms?
// - We must throttle at >250ms for superfocus in Dashboard to work well (because animation takes 0.25s, and we want to wait for all animations to be over before refocusing).
// [Practical Check]: if thottle is at 100ms, then if you are uploading a file, and click 'ADD MORE FILES', - focus won't activate in Firefox.
// - We must throttle at around >500ms to avoid performance lags.
// [Practical Check] Firefox, try to upload a big file for a prolonged period of time. Laptop will start to heat up.
this._calculateProgress = throttle(this._calculateProgress.bind(this), 500, { leading: true, trailing: true })
this.updateOnlineStatus = this.updateOnlineStatus.bind(this)
this.resetProgress = this.resetProgress.bind(this)
this.pauseAll = this.pauseAll.bind(this)
this.resumeAll = this.resumeAll.bind(this)
this.retryAll = this.retryAll.bind(this)
this.cancelAll = this.cancelAll.bind(this)
this.retryUpload = this.retryUpload.bind(this)
this.upload = this.upload.bind(this)
this.emitter = ee()
this.on = this.on.bind(this)
this.off = this.off.bind(this)
this.once = this.emitter.once.bind(this.emitter)
this.emit = this.emitter.emit.bind(this.emitter)
this.preProcessors = []
this.uploaders = []
this.postProcessors = []
this.store = this.opts.store
this.setState({
plugins: {},
files: {},
currentUploads: {},
allowNewUpload: true,
capabilities: {
uploadProgress: supportsUploadProgress(),
individualCancellation: true,
resumableUploads: false
},
totalProgress: 0,
meta: { ...this.opts.meta },
info: {
isHidden: true,
type: 'info',
message: ''
}
})
this._storeUnsubscribe = this.store.subscribe((prevState, nextState, patch) => {
this.emit('state-update', prevState, nextState, patch)
this.updateAll(nextState)
})
// Exposing uppy object on window for debugging and testing
if (this.opts.debug && typeof window !== 'undefined') {
window[this.opts.id] = this
}
this._addListeners()
// Re-enable if we’ll need some capabilities on boot, like isMobileDevice
// this._setCapabilities()
}
// _setCapabilities = () => {
// const capabilities = {
// isMobileDevice: isMobileDevice()
// }
// this.setState({
// ...this.getState().capabilities,
// capabilities
// })
// }
on (event, callback) {
this.emitter.on(event, callback)
return this
}
off (event, callback) {
this.emitter.off(event, callback)
return this
}
/**
* Iterate on all plugins and run `update` on them.
* Called each time state changes.
*
*/
updateAll (state) {
this.iteratePlugins(plugin => {
plugin.update(state)
})
}
/**
* Updates state with a patch
*
* @param {object} patch {foo: 'bar'}
*/
setState (patch) {
this.store.setState(patch)
}
/**
* Returns current state.
*
* @returns {object}
*/
getState () {
return this.store.getState()
}
/**
* Back compat for when uppy.state is used instead of uppy.getState().
*/
get state () {
return this.getState()
}
/**
* Shorthand to set state for a specific file.
*/
setFileState (fileID, state) {
if (!this.getState().files[fileID]) {
throw new Error(`Can’t set state for ${fileID} (the file could have been removed)`)
}
this.setState({
files: Object.assign({}, this.getState().files, {
[fileID]: Object.assign({}, this.getState().files[fileID], state)
})
})
}
i18nInit () {
this.translator = new Translator([this.defaultLocale, this.opts.locale])
this.locale = this.translator.locale
this.i18n = this.translator.translate.bind(this.translator)
this.i18nArray = this.translator.translateArray.bind(this.translator)
}
setOptions (newOpts) {
this.opts = {
...this.opts,
...newOpts,
restrictions: {
...this.opts.restrictions,
...(newOpts && newOpts.restrictions)
}
}
if (newOpts.meta) {
this.setMeta(newOpts.meta)
}
this.i18nInit()
if (newOpts.locale) {
this.iteratePlugins((plugin) => {
plugin.setOptions()
})
}
this.setState() // so that UI re-renders with new options
}
resetProgress () {
const defaultProgress = {
percentage: 0,
bytesUploaded: 0,
uploadComplete: false,
uploadStarted: null
}
const files = Object.assign({}, this.getState().files)
const updatedFiles = {}
Object.keys(files).forEach(fileID => {
const updatedFile = Object.assign({}, files[fileID])
updatedFile.progress = Object.assign({}, updatedFile.progress, defaultProgress)
updatedFiles[fileID] = updatedFile
})
this.setState({
files: updatedFiles,
totalProgress: 0
})
this.emit('reset-progress')
}
addPreProcessor (fn) {
this.preProcessors.push(fn)
}
removePreProcessor (fn) {
const i = this.preProcessors.indexOf(fn)
if (i !== -1) {
this.preProcessors.splice(i, 1)
}
}
addPostProcessor (fn) {
this.postProcessors.push(fn)
}
removePostProcessor (fn) {
const i = this.postProcessors.indexOf(fn)
if (i !== -1) {
this.postProcessors.splice(i, 1)
}
}
addUploader (fn) {
this.uploaders.push(fn)
}
removeUploader (fn) {
const i = this.uploaders.indexOf(fn)
if (i !== -1) {
this.uploaders.splice(i, 1)
}
}
setMeta (data) {
const updatedMeta = Object.assign({}, this.getState().meta, data)
const updatedFiles = Object.assign({}, this.getState().files)
Object.keys(updatedFiles).forEach((fileID) => {
updatedFiles[fileID] = Object.assign({}, updatedFiles[fileID], {
meta: Object.assign({}, updatedFiles[fileID].meta, data)
})
})
this.log('Adding metadata:')
this.log(data)
this.setState({
meta: updatedMeta,
files: updatedFiles
})
}
setFileMeta (fileID, data) {
const updatedFiles = Object.assign({}, this.getState().files)
if (!updatedFiles[fileID]) {
this.log('Was trying to set metadata for a file that has been removed: ', fileID)
return
}
const newMeta = Object.assign({}, updatedFiles[fileID].meta, data)
updatedFiles[fileID] = Object.assign({}, updatedFiles[fileID], {
meta: newMeta
})
this.setState({ files: updatedFiles })
}
/**
* Get a file object.
*
* @param {string} fileID The ID of the file object to return.
*/
getFile (fileID) {
return this.getState().files[fileID]
}
/**
* Get all files in an array.
*/
getFiles () {
const { files } = this.getState()
return Object.keys(files).map((fileID) => files[fileID])
}
/**
* Check if minNumberOfFiles restriction is reached before uploading.
*
* @private
*/
_checkMinNumberOfFiles (files) {
const { minNumberOfFiles } = this.opts.restrictions
if (Object.keys(files).length < minNumberOfFiles) {
throw new RestrictionError(`${this.i18n('youHaveToAtLeastSelectX', { smart_count: minNumberOfFiles })}`)
}
}
/**
* Check if file passes a set of restrictions set in options: maxFileSize, minFileSize,
* maxNumberOfFiles and allowedFileTypes.
*
* @param {object} files Object of IDs → files already added
* @param {object} file object to check
* @private
*/
_checkRestrictions (files, file) {
const { maxFileSize, minFileSize, maxTotalFileSize, maxNumberOfFiles, allowedFileTypes } = this.opts.restrictions
if (maxNumberOfFiles) {
if (Object.keys(files).length + 1 > maxNumberOfFiles) {
throw new RestrictionError(`${this.i18n('youCanOnlyUploadX', { smart_count: maxNumberOfFiles })}`)
}
}
if (allowedFileTypes) {
const isCorrectFileType = allowedFileTypes.some((type) => {
// is this is a mime-type
if (type.indexOf('/') > -1) {
if (!file.type) return false
return match(file.type.replace(/;.*?$/, ''), type)
}
// otherwise this is likely an extension
if (type[0] === '.') {
return file.extension.toLowerCase() === type.substr(1).toLowerCase()
}
return false
})
if (!isCorrectFileType) {
const allowedFileTypesString = allowedFileTypes.join(', ')
throw new RestrictionError(this.i18n('youCanOnlyUploadFileTypes', { types: allowedFileTypesString }))
}
}
// We can't check maxTotalFileSize if the size is unknown.
if (maxTotalFileSize && file.data.size != null) {
let totalFilesSize = 0
totalFilesSize += file.data.size
Object.keys(files).forEach((file) => {
totalFilesSize += files[file].data.size
})
if (totalFilesSize > maxTotalFileSize) {
throw new RestrictionError(this.i18n('exceedsSize2', {
backwardsCompat: this.i18n('exceedsSize'),
size: prettierBytes(maxTotalFileSize)
}))
}
}
// We can't check maxFileSize if the size is unknown.
if (maxFileSize && file.data.size != null) {
if (file.data.size > maxFileSize) {
throw new RestrictionError(this.i18n('exceedsSize2', {
backwardsCompat: this.i18n('exceedsSize'),
size: prettierBytes(maxFileSize)
}))
}
}
// We can't check minFileSize if the size is unknown.
if (minFileSize && file.data.size != null) {
if (file.data.size < minFileSize) {
throw new RestrictionError(this.i18n('inferiorSize', {
size: prettierBytes(minFileSize)
}))
}
}
}
/**
* Logs an error, sets Informer message, then throws the error.
* Emits a 'restriction-failed' event if it’s a restriction error
*
* @param {object | string} err — Error object or plain string message
* @param {object} [options]
* @param {boolean} [options.showInformer=true] — Sometimes developer might want to show Informer manually
* @param {object} [options.file=null] — File object used to emit the restriction error
* @param {boolean} [options.throwErr=true] — Errors shouldn’t be thrown, for example, in `upload-error` event
* @private
*/
_showOrLogErrorAndThrow (err, { showInformer = true, file = null, throwErr = true } = {}) {
const message = typeof err === 'object' ? err.message : err
const details = (typeof err === 'object' && err.details) ? err.details : ''
// Restriction errors should be logged, but not as errors,
// as they are expected and shown in the UI.
let logMessageWithDetails = message
if (details) {
logMessageWithDetails += ' ' + details
}
if (err.isRestriction) {
this.log(logMessageWithDetails)
this.emit('restriction-failed', file, err)
} else {
this.log(logMessageWithDetails, 'error')
}
// Sometimes informer has to be shown manually by the developer,
// for example, in `onBeforeFileAdded`.
if (showInformer) {
this.info({ message: message, details: details }, 'error', 5000)
}
if (throwErr) {
throw (typeof err === 'object' ? err : new Error(err))
}
}
_assertNewUploadAllowed (file) {
const { allowNewUpload } = this.getState()
if (allowNewUpload === false) {
this._showOrLogErrorAndThrow(new RestrictionError(this.i18n('noNewAlreadyUploading')), { file })
}
}
/**
* Create a file state object based on user-provided `addFile()` options.
*
* Note this is extremely side-effectful and should only be done when a file state object will be added to state immediately afterward!
*
* The `files` value is passed in because it may be updated by the caller without updating the store.
*/
_checkAndCreateFileStateObject (files, file) {
const fileType = getFileType(file)
file.type = fileType
const onBeforeFileAddedResult = this.opts.onBeforeFileAdded(file, files)
if (onBeforeFileAddedResult === false) {
// Don’t show UI info for this error, as it should be done by the developer
this._showOrLogErrorAndThrow(new RestrictionError('Cannot add the file because onBeforeFileAdded returned false.'), { showInformer: false, file })
}
if (typeof onBeforeFileAddedResult === 'object' && onBeforeFileAddedResult) {
file = onBeforeFileAddedResult
}
let fileName
if (file.name) {
fileName = file.name
} else if (fileType.split('/')[0] === 'image') {
fileName = fileType.split('/')[0] + '.' + fileType.split('/')[1]
} else {
fileName = 'noname'
}
const fileExtension = getFileNameAndExtension(fileName).extension
const isRemote = file.isRemote || false
const fileID = generateFileID(file)
if (files[fileID]) {
this._showOrLogErrorAndThrow(new RestrictionError(this.i18n('noDuplicates', { fileName })), { file })
}
const meta = file.meta || {}
meta.name = fileName
meta.type = fileType
// `null` means the size is unknown.
const size = isFinite(file.data.size) ? file.data.size : null
const newFile = {
source: file.source || '',
id: fileID,
name: fileName,
extension: fileExtension || '',
meta: {
...this.getState().meta,
...meta
},
type: fileType,
data: file.data,
progress: {
percentage: 0,
bytesUploaded: 0,
bytesTotal: size,
uploadComplete: false,
uploadStarted: null
},
size: size,
isRemote: isRemote,
remote: file.remote || '',
preview: file.preview
}
try {
this._checkRestrictions(files, newFile)
} catch (err) {
this._showOrLogErrorAndThrow(err, { file: newFile })
}
return newFile
}
// Schedule an upload if `autoProceed` is enabled.
_startIfAutoProceed () {
if (this.opts.autoProceed && !this.scheduledAutoProceed) {
this.scheduledAutoProceed = setTimeout(() => {
this.scheduledAutoProceed = null
this.upload().catch((err) => {
if (!err.isRestriction) {
this.log(err.stack || err.message || err)
}
})
}, 4)
}
}
/**
* Add a new file to `state.files`. This will run `onBeforeFileAdded`,
* try to guess file type in a clever way, check file against restrictions,
* and start an upload if `autoProceed === true`.
*
* @param {object} file object to add
* @returns {string} id for the added file
*/
addFile (file) {
this._assertNewUploadAllowed(file)
const { files } = this.getState()
const newFile = this._checkAndCreateFileStateObject(files, file)
this.setState({
files: {
...files,
[newFile.id]: newFile
}
})
this.emit('file-added', newFile)
this.log(`Added file: ${newFile.name}, ${newFile.id}, mime type: ${newFile.type}`)
this._startIfAutoProceed()
return newFile.id
}
/**
* Add multiple files to `state.files`. See the `addFile()` documentation.
*
* This cuts some corners for performance, so should typically only be used in cases where there may be a lot of files.
*
* If an error occurs while adding a file, it is logged and the user is notified. This is good for UI plugins, but not for programmatic use. Programmatic users should usually still use `addFile()` on individual files.
*/
addFiles (fileDescriptors) {
this._assertNewUploadAllowed()
// create a copy of the files object only once
const files = { ...this.getState().files }
const newFiles = []
const errors = []
for (let i = 0; i < fileDescriptors.length; i++) {
try {
const newFile = this._checkAndCreateFileStateObject(files, fileDescriptors[i])
newFiles.push(newFile)
files[newFile.id] = newFile
} catch (err) {
if (!err.isRestriction) {
errors.push(err)
}
}
}
this.setState({ files })
newFiles.forEach((newFile) => {
this.emit('file-added', newFile)
})
if (newFiles.length > 5) {
this.log(`Added batch of ${newFiles.length} files`)
} else {
Object.keys(newFiles).forEach(fileID => {
this.log(`Added file: ${newFiles[fileID].name}\n id: ${newFiles[fileID].id}\n type: ${newFiles[fileID].type}`)
})
}
if (newFiles.length > 0) {
this._startIfAutoProceed()
}
if (errors.length > 0) {
let message = 'Multiple errors occurred while adding files:\n'
errors.forEach((subError) => {
message += `\n * ${subError.message}`
})
this.info({
message: this.i18n('addBulkFilesFailed', { smart_count: errors.length }),
details: message
}, 'error', 5000)
const err = new Error(message)
err.errors = errors
throw err
}
}
removeFiles (fileIDs, reason) {
const { files, currentUploads } = this.getState()
const updatedFiles = { ...files }
const updatedUploads = { ...currentUploads }
const removedFiles = Object.create(null)
fileIDs.forEach((fileID) => {
if (files[fileID]) {
removedFiles[fileID] = files[fileID]
delete updatedFiles[fileID]
}
})
// Remove files from the `fileIDs` list in each upload.
function fileIsNotRemoved (uploadFileID) {
return removedFiles[uploadFileID] === undefined
}
const uploadsToRemove = []
Object.keys(updatedUploads).forEach((uploadID) => {
const newFileIDs = currentUploads[uploadID].fileIDs.filter(fileIsNotRemoved)
// Remove the upload if no files are associated with it anymore.
if (newFileIDs.length === 0) {
uploadsToRemove.push(uploadID)
return
}
updatedUploads[uploadID] = {
...currentUploads[uploadID],
fileIDs: newFileIDs
}
})
uploadsToRemove.forEach((uploadID) => {
delete updatedUploads[uploadID]
})
const stateUpdate = {
currentUploads: updatedUploads,
files: updatedFiles
}
// If all files were removed - allow new uploads!
if (Object.keys(updatedFiles).length === 0) {
stateUpdate.allowNewUpload = true
stateUpdate.error = null
}
this.setState(stateUpdate)
this._calculateTotalProgress()
const removedFileIDs = Object.keys(removedFiles)
removedFileIDs.forEach((fileID) => {
this.emit('file-removed', removedFiles[fileID], reason)
})
if (removedFileIDs.length > 5) {
this.log(`Removed ${removedFileIDs.length} files`)
} else {
this.log(`Removed files: ${removedFileIDs.join(', ')}`)
}
}
removeFile (fileID, reason = null) {
this.removeFiles([fileID], reason)
}
pauseResume (fileID) {
if (!this.getState().capabilities.resumableUploads ||
this.getFile(fileID).uploadComplete) {
return
}
const wasPaused = this.getFile(fileID).isPaused || false
const isPaused = !wasPaused
this.setFileState(fileID, {
isPaused: isPaused
})
this.emit('upload-pause', fileID, isPaused)
return isPaused
}
pauseAll () {
const updatedFiles = Object.assign({}, this.getState().files)
const inProgressUpdatedFiles = Object.keys(updatedFiles).filter((file) => {
return !updatedFiles[file].progress.uploadComplete &&
updatedFiles[file].progress.uploadStarted
})
inProgressUpdatedFiles.forEach((file) => {
const updatedFile = Object.assign({}, updatedFiles[file], {
isPaused: true
})
updatedFiles[file] = updatedFile
})
this.setState({ files: updatedFiles })
this.emit('pause-all')
}
resumeAll () {
const updatedFiles = Object.assign({}, this.getState().files)
const inProgressUpdatedFiles = Object.keys(updatedFiles).filter((file) => {
return !updatedFiles[file].progress.uploadComplete &&
updatedFiles[file].progress.uploadStarted
})
inProgressUpdatedFiles.forEach((file) => {
const updatedFile = Object.assign({}, updatedFiles[file], {
isPaused: false,
error: null
})
updatedFiles[file] = updatedFile
})
this.setState({ files: updatedFiles })
this.emit('resume-all')
}
retryAll () {
const updatedFiles = Object.assign({}, this.getState().files)
const filesToRetry = Object.keys(updatedFiles).filter(file => {
return updatedFiles[file].error
})
filesToRetry.forEach((file) => {
const updatedFile = Object.assign({}, updatedFiles[file], {
isPaused: false,
error: null
})
updatedFiles[file] = updatedFile
})
this.setState({
files: updatedFiles,
error: null
})
this.emit('retry-all', filesToRetry)
if (filesToRetry.length === 0) {
return Promise.resolve({
successful: [],
failed: []
})
}
const uploadID = this._createUpload(filesToRetry, {
forceAllowNewUpload: true // create new upload even if allowNewUpload: false
})
return this._runUpload(uploadID)
}
cancelAll () {
this.emit('cancel-all')
const { files } = this.getState()
const fileIDs = Object.keys(files)
if (fileIDs.length) {
this.removeFiles(fileIDs, 'cancel-all')
}
this.setState({
totalProgress: 0,
error: null
})
}
retryUpload (fileID) {
this.setFileState(fileID, {
error: null,
isPaused: false
})
this.emit('upload-retry', fileID)
const uploadID = this._createUpload([fileID], {
forceAllowNewUpload: true // create new upload even if allowNewUpload: false
})
return this._runUpload(uploadID)
}
reset () {
this.cancelAll()
}
_calculateProgress (file, data) {
if (!this.getFile(file.id)) {
this.log(`Not setting progress for a file that has been removed: ${file.id}`)
return
}
// bytesTotal may be null or zero; in that case we can't divide by it
const canHavePercentage = isFinite(data.bytesTotal) && data.bytesTotal > 0
this.setFileState(file.id, {
progress: {
...this.getFile(file.id).progress,
bytesUploaded: data.bytesUploaded,
bytesTotal: data.bytesTotal,
percentage: canHavePercentage
// TODO(goto-bus-stop) flooring this should probably be the choice of the UI?
// we get more accurate calculations if we don't round this at all.
? Math.round(data.bytesUploaded / data.bytesTotal * 100)
: 0
}
})
this._calculateTotalProgress()
}
_calculateTotalProgress () {
// calculate total progress, using the number of files currently uploading,
// multiplied by 100 and the summ of individual progress of each file
const files = this.getFiles()
const inProgress = files.filter((file) => {
return file.progress.uploadStarted ||
file.progress.preprocess ||
file.progress.postprocess
})
if (inProgress.length === 0) {
this.emit('progress', 0)
this.setState({ totalProgress: 0 })
return
}
const sizedFiles = inProgress.filter((file) => file.progress.bytesTotal != null)
const unsizedFiles = inProgress.filter((file) => file.progress.bytesTotal == null)
if (sizedFiles.length === 0) {
const progressMax = inProgress.length * 100
const currentProgress = unsizedFiles.reduce((acc, file) => {
return acc + file.progress.percentage
}, 0)
const totalProgress = Math.round(currentProgress / progressMax * 100)
this.setState({ totalProgress })
return
}
let totalSize = sizedFiles.reduce((acc, file) => {
return acc + file.progress.bytesTotal
}, 0)
const averageSize = totalSize / sizedFiles.length
totalSize += averageSize * unsizedFiles.length
let uploadedSize = 0
sizedFiles.forEach((file) => {
uploadedSize += file.progress.bytesUploaded
})
unsizedFiles.forEach((file) => {
uploadedSize += averageSize * (file.progress.percentage || 0) / 100
})
let totalProgress = totalSize === 0
? 0
: Math.round(uploadedSize / totalSize * 100)
// hot fix, because:
// uploadedSize ended up larger than totalSize, resulting in 1325% total
if (totalProgress > 100) {
totalProgress = 100
}
this.setState({ totalProgress })
this.emit('progress', totalProgress)
}
/**
* Registers listeners for all global actions, like:
* `error`, `file-removed`, `upload-progress`
*/
_addListeners () {
this.on('error', (error) => {
let errorMsg = 'Unknown error'
if (error.message) {
errorMsg = error.message
}
if (error.details) {
errorMsg += ' ' + error.details
}
this.setState({ error: errorMsg })
})
this.on('upload-error', (file, error, response) => {
let errorMsg = 'Unknown error'
if (error.message) {
errorMsg = error.message
}
if (error.details) {
errorMsg += ' ' + error.details
}
this.setFileState(file.id, {
error: errorMsg,
response
})
this.setState({ error: error.message })
if (typeof error === 'object' && error.message) {
const newError = new Error(error.message)
newError.details = error.message
if (error.details) {
newError.details += ' ' + error.details
}
newError.message = this.i18n('failedToUpload', { file: file.name })
this._showOrLogErrorAndThrow(newError, {
throwErr: false
})
} else {
this._showOrLogErrorAndThrow(error, {
throwErr: false
})
}
})
this.on('upload', () => {
this.setState({ error: null })
})
this.on('upload-started', (file, upload) => {
if (!this.getFile(file.id)) {
this.log(`Not setting progress for a file that has been removed: ${file.id}`)
return
}
this.setFileState(file.id, {
progress: {
uploadStarted: Date.now(),
uploadComplete: false,
percentage: 0,
bytesUploaded: 0,
bytesTotal: file.size
}
})
})
this.on('upload-progress', this._calculateProgress)
this.on('upload-success', (file, uploadResp) => {
if (!this.getFile(file.id)) {
this.log(`Not setting progress for a file that has been removed: ${file.id}`)
return
}
const currentProgress = this.getFile(file.id).progress
this.setFileState(file.id, {
progress: Object.assign({}, currentProgress, {
postprocess: this.postProcessors.length > 0 ? {
mode: 'indeterminate'
} : null,
uploadComplete: true,
percentage: 100,
bytesUploaded: currentProgress.bytesTotal
}),
response: uploadResp,
uploadURL: uploadResp.uploadURL,
isPaused: false
})
this._calculateTotalProgress()
})
this.on('preprocess-progress', (file, progress) => {
if (!this.getFile(file.id)) {
this.log(`Not setting progress for a file that has been removed: ${file.id}`)
return
}
this.setFileState(file.id, {
progress: Object.assign({}, this.getFile(file.id).progress, {
preprocess: progress
})
})
})
this.on('preprocess-complete', (file) => {
if (!this.getFile(file.id)) {
this.log(`Not setting progress for a file that has been removed: ${file.id}`)
return
}
const files = Object.assign({}, this.getState().files)
files[file.id] = Object.assign({}, files[file.id], {
progress: Object.assign({}, files[file.id].progress)
})
delete files[file.id].progress.preprocess
this.setState({ files: files })
})
this.on('postprocess-progress', (file, progress) => {
if (!this.getFile(file.id)) {
this.log(`Not setting progress for a file that has been removed: ${file.id}`)
return
}
this.setFileState(file.id, {
progress: Object.assign({}, this.getState().files[file.id].progress, {
postprocess: progress
})
})
})
this.on('postprocess-complete', (file) => {
if (!this.getFile(file.id)) {
this.log(`Not setting progress for a file that has been removed: ${file.id}`)
return
}
const files = Object.assign({}, this.getState().files)
files[file.id] = Object.assign({}, files[file.id], {
progress: Object.assign({}, files[file.id].progress)
})
delete files[file.id].progress.postprocess
// TODO should we set some kind of `fullyComplete` property on the file object
// so it's easier to see that the file is upload…fully complete…rather than
// what we have to do now (`uploadComplete && !postprocess`)
this.setState({ files: files })
})
this.on('restored', () => {
// Files may have changed--ensure progress is still accurate.
this._calculateTotalProgress()
})
// show informer if offline
if (typeof window !== 'undefined' && window.addEventListener) {
window.addEventListener('online', () => this.updateOnlineStatus())
window.addEventListener('offline', () => this.updateOnlineStatus())
setTimeout(() => this.updateOnlineStatus(), 3000)
}
}
updateOnlineStatus () {
const online =
typeof window.navigator.onLine !== 'undefined'
? window.navigator.onLine
: true
if (!online) {
this.emit('is-offline')
this.info(this.i18n('noInternetConnection'), 'error', 0)
this.wasOffline = true
} else {
this.emit('is-online')
if (this.wasOffline) {
this.emit('back-online')
this.info(this.i18n('connectedToInternet'), 'success', 3000)
this.wasOffline = false
}
}
}
getID () {
return this.opts.id
}
/**
* Registers a plugin with Core.
*
* @param {object} Plugin object
* @param {object} [opts] object with options to be passed to Plugin
* @returns {object} self for chaining
*/
use (Plugin, opts) {
if (typeof Plugin !== 'function') {
const msg = `Expected a plugin class, but got ${Plugin === null ? 'null' : typeof Plugin}.` +
' Please verify that the plugin was imported and spelled correctly.'
throw new TypeError(msg)
}
// Instantiate
const plugin = new Plugin(this, opts)
const pluginId = plugin.id
this.plugins[plugin.type] = this.plugins[plugin.type] || []
if (!pluginId) {
throw new Error('Your plugin must have an id')
}
if (!plugin.type) {
throw new Error('Your plugin must have a type')
}
const existsPluginAlready = this.getPlugin(pluginId)
if (existsPluginAlready) {
const msg = `Already found a plugin named '${existsPluginAlready.id}'. ` +
`Tried to use: '${pluginId}'.\n` +
'Uppy plugins must have unique `id` options. See https://uppy.io/docs/plugins/#id.'
throw new Error(msg)
}
if (Plugin.VERSION) {
this.log(`Using ${pluginId} v${Plugin.VERSION}`)
}
this.plugins[plugin.type].push(plugin)
plugin.install()
return this
}
/**
* Find one Plugin by name.
*
* @param {string} id plugin id
* @returns {object|boolean}
*/
getPlugin (id) {
let foundPlugin = null
this.iteratePlugins((plugin) => {
if (plugin.id === id) {
foundPlugin = plugin
return false
}
})
return foundPlugin
}
/**
* Iterate through all `use`d plugins.
*
* @param {Function} method that will be run on each plugin
*/
iteratePlugins (method) {
Object.keys(this.plugins).forEach(pluginType => {
this.plugins[pluginType].forEach(method)
})
}
/**
* Uninstall and remove a plugin.
*
* @param {object} instance The plugin instance to remove.
*/
removePlugin (instance) {
this.log(`Removing plugin ${instance.id}`)
this.emit('plugin-remove', instance)
if (instance.uninstall) {
instance.uninstall()
}
const list = this.plugins[instance.type].slice()
const index = list.indexOf(instance)
if (index !== -1) {
list.splice(index, 1)
this.plugins[instance.type] = list
}
const updatedState = this.getState()
delete updatedState.plugins[instance.id]
this.setState(updatedState)
}
/**
* Uninstall all plugins and close down this Uppy instance.
*/
close () {
this.log(`Closing Uppy instance ${this.opts.id}: removing all files and uninstalling plugins`)
this.reset()
this._storeUnsubscribe()
this.iteratePlugins((plugin) => {
this.removePlugin(plugin)
})
}
/**
* Set info message in `state.info`, so that UI plugins like `Informer`
* can display the message.
*
* @param {string | object} message Message to be displayed by the informer
* @param {string} [type]
* @param {number} [duration]
*/
info (message, type = 'info', duration = 3000) {
const isComplexMessage = typeof message === 'object'
this.setState({
info: {
isHidden: false,
type: type,
message: isComplexMessage ? message.message : message,
details: isComplexMessage ? message.details : null
}
})
this.emit('info-visible')
clearTimeout(this.infoTimeoutID)
if (duration === 0) {
this.infoTimeoutID = undefined
return
}
// hide the informer after `duration` milliseconds
this.infoTimeoutID = setTimeout(this.hideInfo, duration)
}
hideInfo () {
const newInfo = Object.assign({}, this.getState().info, {
isHidden: true
})
this.setState({
info: newInfo
})
this.emit('info-hidden')
}
/**
* Passes messages to a function, provided in `opts.logger`.
* If `opts.logger: Uppy.debugLogger` or `opts.debug: true`, logs to the browser console.
*
* @param {string|object} message to log
* @param {string} [type] optional `error` or `warning`
*/
log (message, type) {
const { logger } = this.opts
switch (type) {
case 'error': logger.error(message); break
case 'warning': logger.warn(message); break
default: logger.debug(message); break
}
}
/**
* Obsolete, event listeners are now added in the constructor.
*/
run () {
this.log('Calling run() is no longer necessary.', 'warning')
return this
}
/**
* Restore an upload by its ID.
*/
restore (uploadID) {
this.log(`Core: attempting to restore upload "${uploadID}"`)
if (!this.getState().currentUploads[uploadID]) {
this._removeUpload(uploadID)
return Promise.reject(new Error('Nonexistent upload'))
}
return this._runUpload(uploadID)
}
/**
* Create an upload for a bunch of files.
*
* @param {Array<string>} fileIDs File IDs to include in this upload.
* @returns {string} ID of this upload.
*/
_createUpload (fileIDs, opts = {}) {
const {
forceAllowNewUpload = false // uppy.retryAll sets this to true — when retrying we want to ignore `allowNewUpload: false`
} = opts
const { allowNewUpload, currentUploads } = this.getState()
if (!allowNewUpload && !forceAllowNewUpload) {
throw new Error('Cannot create a new upload: already uploading.')
}
const uploadID = cuid()
this.emit('upload', {
id: uploadID,
fileIDs: fileIDs
})
this.setState({
allowNewUpload: this.opts.allowMultipleUploads !== false,
currentUploads: {
...currentUploads,
[uploadID]: {
fileIDs: fileIDs,
step: 0,
result: {}
}
}
})
return uploadID
}
_getUpload (uploadID) {
const { currentUploads } = this.getState()
return currentUploads[uploadID]
}
/**
* Add data to an upload's result object.
*
* @param {string} uploadID The ID of the upload.
* @param {object} data Data properties to add to the result object.
*/
addResultData (uploadID, data) {
if (!this._getUpload(uploadID)) {
this.log(`Not setting result for an upload that has been removed: ${uploadID}`)
return
}
const currentUploads = this.getState().currentUploads
const currentUpload = Object.assign({}, currentUploads[uploadID], {
result: Object.assign({}, currentUploads[uploadID].result, data)
})
this.setState({
currentUploads: Object.assign({}, currentUploads, {
[uploadID]: currentUpload
})
})
}
/**
* Remove an upload, eg. if it has been canceled or completed.
*
* @param {string} uploadID The ID of the upload.
*/
_removeUpload (uploadID) {
const currentUploads = { ...this.getState().currentUploads }
delete currentUploads[uploadID]
this.setState({
currentUploads: currentUploads
})
}
/**
* Run an upload. This picks up where it left off in case the upload is being restored.
*
* @private
*/
_runUpload (uploadID) {
const uploadData = this.getState().currentUploads[uploadID]
const restoreStep = uploadData.step
const steps = [
...this.preProcessors,
...this.uploaders,
...this.postProcessors
]
let lastStep = Promise.resolve()
steps.forEach((fn, step) => {
// Skip this step if we are restoring and have already completed this step before.
if (step < restoreStep) {
return
}
lastStep = lastStep.then(() => {
const { currentUploads } = this.getState()
const currentUpload = currentUploads[uploadID]
if (!currentUpload) {
return
}
const updatedUpload = Object.assign({}, currentUpload, {
step: step
})
this.setState({
currentUploads: Object.assign({}, currentUploads, {
[uploadID]: updatedUpload
})
})
// TODO give this the `updatedUpload` object as its only parameter maybe?
// Otherwise when more metadata may be added to the upload this would keep getting more parameters
return fn(updatedUpload.fileIDs, uploadID)
}).then((result) => {
return null
})
})
// Not returning the `catch`ed promise, because we still want to return a rejected
// promise from this method if the upload failed.
lastStep.catch((err) => {
this.emit('error', err, uploadID)
this._removeUpload(uploadID)
})
return lastStep.then(() => {
// Set result data.
const { currentUploads } = this.getState()
const currentUpload = currentUploads[uploadID]
if (!currentUpload) {
return
}
const files = currentUpload.fileIDs
.map((fileID) => this.getFile(fileID))
const successful = files.filter((file) => !file.error)
const failed = files.filter((file) => file.error)
this.addResultData(uploadID, { successful, failed, uploadID })
}).then(() => {
// Emit completion events.
// This is in a separate function so that the `currentUploads` variable
// always refers to the latest state. In the handler right above it refers
// to an outdated object without the `.result` property.
const { currentUploads } = this.getState()
if (!currentUploads[uploadID]) {
return
}
const currentUpload = currentUploads[uploadID]
const result = currentUpload.result
this.emit('complete', result)
this._removeUpload(uploadID)
return result
}).then((result) => {
if (result == null) {
this.log(`Not setting result for an upload that has been removed: ${uploadID}`)
}
return result
})
}
/**
* Start an upload for all the files that are not currently being uploaded.
*
* @returns {Promise}
*/
upload () {
if (!this.plugins.uploader) {
this.log('No uploader type plugins are used', 'warning')
}
let files = this.getState().files
const onBeforeUploadResult = this.opts.onBeforeUpload(files)
if (onBeforeUploadResult === false) {
return Promise.reject(new Error('Not starting the upload because onBeforeUpload returned false'))
}
if (onBeforeUploadResult && typeof onBeforeUploadResult === 'object') {
files = onBeforeUploadResult
// Updating files in state, because uploader plugins receive file IDs,
// and then fetch the actual file object from state
this.setState({
files: files
})
}
return Promise.resolve()
.then(() => this._checkMinNumberOfFiles(files))
.catch((err) => {
this._showOrLogErrorAndThrow(err)
})
.then(() => {
const { currentUploads } = this.getState()
// get a list of files that are currently assigned to uploads
const currentlyUploadingFiles = Object.keys(currentUploads).reduce((prev, curr) => prev.concat(currentUploads[curr].fileIDs), [])
const waitingFileIDs = []
Object.keys(files).forEach((fileID) => {
const file = this.getFile(fileID)
// if the file hasn't started uploading and hasn't already been assigned to an upload..
if ((!file.progress.uploadStarted) && (currentlyUploadingFiles.indexOf(fileID) === -1)) {
waitingFileIDs.push(file.id)
}
})
const uploadID = this._createUpload(waitingFileIDs)
return this._runUpload(uploadID)
})
.catch((err) => {
this._showOrLogErrorAndThrow(err, {
showInformer: false
})
})
}
}
module.exports = function (opts) {
return new Uppy(opts)
}
// Expose class constructor.
module.exports.Uppy = Uppy
module.exports.Plugin = Plugin
module.exports.debugLogger = debugLogger
| 1 | 13,604 | Mb change it to 'doesPassRestrictions()'? Doesn't sound enough like a boolean-returning method to me. | transloadit-uppy | js |
@@ -17,7 +17,7 @@ import (
// Lookup is the interface for time based feed lookup
type Lookup interface {
- At(ctx context.Context, at, after int64) (swarm.Chunk, error)
+ At(ctx context.Context, at, after int64) (chunk swarm.Chunk, currentIndex, nextIndex Index, err error)
}
// Getter encapsulates a chunk Getter getter and a feed and provides | 1 | // Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package feeds
import (
"context"
"encoding/binary"
"fmt"
"time"
"github.com/ethersphere/bee/pkg/soc"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
)
// Lookup is the interface for time based feed lookup
type Lookup interface {
At(ctx context.Context, at, after int64) (swarm.Chunk, error)
}
// Getter encapsulates a chunk Getter getter and a feed and provides
// non-concurrent lookup methods
type Getter struct {
getter storage.Getter
*Feed
}
// NewGetter constructs a feed Getter
func NewGetter(getter storage.Getter, feed *Feed) *Getter {
return &Getter{getter, feed}
}
// Latest looks up the latest update of the feed
// after is a unix time hint of the latest known update
func Latest(ctx context.Context, l Lookup, after int64) (swarm.Chunk, error) {
return l.At(ctx, time.Now().Unix(), after)
}
// Get creates an update of the underlying feed at the given epoch
// and looks it up in the chunk Getter based on its address
func (f *Getter) Get(ctx context.Context, i Index) (swarm.Chunk, error) {
addr, err := f.Feed.Update(i).Address()
if err != nil {
return nil, err
}
return f.getter.Get(ctx, storage.ModeGetRequest, addr)
}
// FromChunk parses out the timestamp and the payload
func FromChunk(ch swarm.Chunk) (uint64, []byte, error) {
s, err := soc.FromChunk(ch)
if err != nil {
return 0, nil, err
}
cac := s.Chunk
if len(cac.Data()) < 16 {
return 0, nil, fmt.Errorf("feed update payload too short")
}
payload := cac.Data()[16:]
at := binary.BigEndian.Uint64(cac.Data()[8:16])
return at, payload, nil
}
// UpdatedAt extracts the time of feed other than update
func UpdatedAt(ch swarm.Chunk) (uint64, error) {
d := ch.Data()
if len(d) < 113 {
return 0, fmt.Errorf("too short: %d", len(d))
}
// a soc chunk with time information in the wrapped content addressed chunk
// 0-32 index,
// 65-97 signature,
// 98-105 span of wrapped chunk
// 105-113 timestamp
return binary.BigEndian.Uint64(d[105:113]), nil
}
| 1 | 13,802 | TODO: in fact At when used with noncurrent time should not return a next index. only latest should.. | ethersphere-bee | go |
@@ -165,7 +165,11 @@ export function diff(
tmp = c.render(c.props, c.state, c.context);
let isTopLevelFragment =
tmp != null && tmp.type == Fragment && tmp.key == null;
- newVNode._children = isTopLevelFragment ? tmp.props.children : tmp;
+ newVNode._children = isTopLevelFragment
+ ? tmp.props.children
+ : Array.isArray(tmp)
+ ? tmp
+ : [tmp];
if (c.getChildContext != null) {
context = assign(assign({}, context), c.getChildContext()); | 1 | import { EMPTY_OBJ, EMPTY_ARR } from '../constants';
import { Component } from '../component';
import { Fragment } from '../create-element';
import { diffChildren } from './children';
import { diffProps } from './props';
import { assign, removeNode } from '../util';
import options from '../options';
/**
* Diff two virtual nodes and apply proper changes to the DOM
* @param {import('../internal').PreactElement} parentDom The parent of the DOM element
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this element is an SVG node
* @param {Array<import('../internal').PreactElement>} excessDomChildren
* @param {Array<import('../internal').Component>} commitQueue List of components
* which have callbacks to invoke in commitRoot
* @param {Element | Text} oldDom The current attached DOM
* element any new dom elements should be placed around. Likely `null` on first
* render (except when hydrating). Can be a sibling DOM element when diffing
* Fragments that have siblings. In most cases, it starts out as `oldChildren[0]._dom`.
* @param {boolean} [isHydrating] Whether or not we are in hydration
*/
export function diff(
parentDom,
newVNode,
oldVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
oldDom,
isHydrating
) {
let tmp,
newType = newVNode.type;
// When passing through createElement it assigns the object
// constructor as undefined. This to prevent JSON-injection.
if (newVNode.constructor !== undefined) return null;
if ((tmp = options._diff)) tmp(newVNode);
try {
outer: if (typeof newType === 'function') {
let c, isNew, oldProps, oldState, snapshot, clearProcessingException;
let newProps = newVNode.props;
// Necessary for createContext api. Setting this property will pass
// the context value as `this.context` just for this component.
tmp = newType.contextType;
let provider = tmp && context[tmp._id];
let cctx = tmp
? provider
? provider.props.value
: tmp._defaultValue
: context;
// Get component and set it to `c`
if (oldVNode._component) {
c = newVNode._component = oldVNode._component;
clearProcessingException = c._processingException = c._pendingError;
} else {
// Instantiate the new component
if ('prototype' in newType && newType.prototype.render) {
newVNode._component = c = new newType(newProps, cctx); // eslint-disable-line new-cap
} else {
newVNode._component = c = new Component(newProps, cctx);
c.constructor = newType;
c.render = doRender;
}
if (provider) provider.sub(c);
c.props = newProps;
if (!c.state) c.state = {};
c.context = cctx;
c._context = context;
isNew = c._dirty = true;
c._renderCallbacks = [];
}
// Invoke getDerivedStateFromProps
if (c._nextState == null) {
c._nextState = c.state;
}
if (newType.getDerivedStateFromProps != null) {
if (c._nextState == c.state) {
c._nextState = assign({}, c._nextState);
}
assign(
c._nextState,
newType.getDerivedStateFromProps(newProps, c._nextState)
);
}
oldProps = c.props;
oldState = c.state;
// Invoke pre-render lifecycle methods
if (isNew) {
if (
newType.getDerivedStateFromProps == null &&
c.componentWillMount != null
) {
c.componentWillMount();
}
if (c.componentDidMount != null) {
c._renderCallbacks.push(c.componentDidMount);
}
} else {
if (
newType.getDerivedStateFromProps == null &&
newProps !== oldProps &&
c.componentWillReceiveProps != null
) {
c.componentWillReceiveProps(newProps, cctx);
}
if (
!c._force &&
c.shouldComponentUpdate != null &&
c.shouldComponentUpdate(newProps, c._nextState, cctx) === false
) {
c.props = newProps;
c.state = c._nextState;
c._dirty = false;
c._vnode = newVNode;
newVNode._dom = oldVNode._dom;
newVNode._children = oldVNode._children;
if (c._renderCallbacks.length) {
commitQueue.push(c);
}
for (tmp = 0; tmp < newVNode._children.length; tmp++) {
if (newVNode._children[tmp]) {
newVNode._children[tmp]._parent = newVNode;
}
}
break outer;
}
if (c.componentWillUpdate != null) {
c.componentWillUpdate(newProps, c._nextState, cctx);
}
if (c.componentDidUpdate != null) {
c._renderCallbacks.push(() => {
c.componentDidUpdate(oldProps, oldState, snapshot);
});
}
}
c.context = cctx;
c.props = newProps;
c.state = c._nextState;
if ((tmp = options._render)) tmp(newVNode);
c._dirty = false;
c._vnode = newVNode;
c._parentDom = parentDom;
tmp = c.render(c.props, c.state, c.context);
let isTopLevelFragment =
tmp != null && tmp.type == Fragment && tmp.key == null;
newVNode._children = isTopLevelFragment ? tmp.props.children : tmp;
if (c.getChildContext != null) {
context = assign(assign({}, context), c.getChildContext());
}
if (!isNew && c.getSnapshotBeforeUpdate != null) {
snapshot = c.getSnapshotBeforeUpdate(oldProps, oldState);
}
diffChildren(
parentDom,
newVNode,
oldVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
oldDom,
isHydrating
);
c.base = newVNode._dom;
if (c._renderCallbacks.length) {
commitQueue.push(c);
}
if (clearProcessingException) {
c._pendingError = c._processingException = null;
}
c._force = false;
} else {
newVNode._dom = diffElementNodes(
oldVNode._dom,
newVNode,
oldVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
isHydrating
);
}
if ((tmp = options.diffed)) tmp(newVNode);
} catch (e) {
options._catchError(e, newVNode, oldVNode);
}
return newVNode._dom;
}
/**
* @param {Array<import('../internal').Component>} commitQueue List of components
* which have callbacks to invoke in commitRoot
* @param {import('../internal').VNode} root
*/
export function commitRoot(commitQueue, root) {
if (options._commit) options._commit(root, commitQueue);
commitQueue.some(c => {
try {
commitQueue = c._renderCallbacks;
c._renderCallbacks = [];
commitQueue.some(cb => {
cb.call(c);
});
} catch (e) {
options._catchError(e, c._vnode);
}
});
}
/**
* Diff two virtual nodes representing DOM element
* @param {import('../internal').PreactElement} dom The DOM element representing
* the virtual nodes being diffed
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this DOM node is an SVG node
* @param {*} excessDomChildren
* @param {Array<import('../internal').Component>} commitQueue List of components
* which have callbacks to invoke in commitRoot
* @param {boolean} isHydrating Whether or not we are in hydration
* @returns {import('../internal').PreactElement}
*/
function diffElementNodes(
dom,
newVNode,
oldVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
isHydrating
) {
let i;
let oldProps = oldVNode.props;
let newProps = newVNode.props;
// Tracks entering and exiting SVG namespace when descending through the tree.
isSvg = newVNode.type === 'svg' || isSvg;
if (dom == null && excessDomChildren != null) {
for (i = 0; i < excessDomChildren.length; i++) {
const child = excessDomChildren[i];
if (
child != null &&
(newVNode.type === null
? child.nodeType === 3
: child.localName === newVNode.type)
) {
dom = child;
excessDomChildren[i] = null;
break;
}
}
}
if (dom == null) {
if (newVNode.type === null) {
return document.createTextNode(newProps);
}
dom = isSvg
? document.createElementNS('http://www.w3.org/2000/svg', newVNode.type)
: document.createElement(
newVNode.type,
newProps.is && { is: newProps.is }
);
// we created a new parent, so none of the previously attached children can be reused:
excessDomChildren = null;
}
if (newVNode.type === null) {
if (excessDomChildren != null) {
excessDomChildren[excessDomChildren.indexOf(dom)] = null;
}
if (oldProps !== newProps && dom.data != newProps) {
dom.data = newProps;
}
} else if (newVNode !== oldVNode) {
if (excessDomChildren != null) {
excessDomChildren[excessDomChildren.indexOf(dom)] = null;
excessDomChildren = EMPTY_ARR.slice.call(dom.childNodes);
}
oldProps = oldVNode.props || EMPTY_OBJ;
let oldHtml = oldProps.dangerouslySetInnerHTML;
let newHtml = newProps.dangerouslySetInnerHTML;
// During hydration, props are not diffed at all (including dangerouslySetInnerHTML)
// @TODO we should warn in debug mode when props don't match here.
if (!isHydrating) {
if (oldProps === EMPTY_OBJ) {
oldProps = {};
for (let i = 0; i < dom.attributes.length; i++) {
oldProps[dom.attributes[i].name] = dom.attributes[i].value;
}
}
if (newHtml || oldHtml) {
// Avoid re-applying the same '__html' if it did not changed between re-render
if (!newHtml || !oldHtml || newHtml.__html != oldHtml.__html) {
dom.innerHTML = (newHtml && newHtml.__html) || '';
}
}
}
diffProps(dom, newProps, oldProps, isSvg, isHydrating);
newVNode._children = newVNode.props.children;
// If the new vnode didn't have dangerouslySetInnerHTML, diff its children
if (!newHtml) {
diffChildren(
dom,
newVNode,
oldVNode,
context,
newVNode.type === 'foreignObject' ? false : isSvg,
excessDomChildren,
commitQueue,
EMPTY_OBJ,
isHydrating
);
}
// (as above, don't diff props during hydration)
if (!isHydrating) {
if (
'value' in newProps &&
newProps.value !== undefined &&
newProps.value !== dom.value
) {
dom.value = newProps.value == null ? '' : newProps.value;
}
if (
'checked' in newProps &&
newProps.checked !== undefined &&
newProps.checked !== dom.checked
) {
dom.checked = newProps.checked;
}
}
}
return dom;
}
/**
* Invoke or update a ref, depending on whether it is a function or object ref.
* @param {object|function} ref
* @param {any} value
* @param {import('../internal').VNode} vnode
*/
export function applyRef(ref, value, vnode) {
try {
if (typeof ref == 'function') ref(value);
else ref.current = value;
} catch (e) {
options._catchError(e, vnode);
}
}
/**
* Unmount a virtual node from the tree and apply DOM changes
* @param {import('../internal').VNode} vnode The virtual node to unmount
* @param {import('../internal').VNode} parentVNode The parent of the VNode that
* initiated the unmount
* @param {boolean} [skipRemove] Flag that indicates that a parent node of the
* current element is already detached from the DOM.
*/
export function unmount(vnode, parentVNode, skipRemove) {
let r;
if (options.unmount) options.unmount(vnode);
if ((r = vnode.ref)) {
if (!r.current || r.current === vnode._dom) applyRef(r, null, parentVNode);
}
let dom;
if (!skipRemove && typeof vnode.type !== 'function') {
skipRemove = (dom = vnode._dom) != null;
}
// Must be set to `undefined` to properly clean up `_nextDom`
// for which `null` is a valid value. See comment in `create-element.js`
vnode._dom = vnode._nextDom = undefined;
if ((r = vnode._component) != null) {
if (r.componentWillUnmount) {
try {
r.componentWillUnmount();
} catch (e) {
options._catchError(e, parentVNode);
}
}
r.base = r._parentDom = null;
}
if ((r = vnode._children)) {
for (let i = 0; i < r.length; i++) {
if (r[i]) unmount(r[i], parentVNode, skipRemove);
}
}
if (dom != null) removeNode(dom);
}
/** The `.render()` method for a PFC backing instance. */
function doRender(props, state, context) {
return this.constructor(props, context);
}
| 1 | 15,303 | There are scenario's where we skip an update with a placeholder, in this case tmp returns null with render, this is not an array so we `[null]` it | preactjs-preact | js |
@@ -33,8 +33,7 @@ import javaslang.match.annotation.Patterns;
// -- javaslang.collection
// List
- @Unapply static <T> Tuple2<T, List<T>> List(List.Cons<T> cons) { return Tuple.of(cons.head(), cons.tail()); }
- @Unapply static <T> Tuple0 List(List.Nil<T> nil) { return Tuple.empty(); }
+ @Unapply static <T> Tuple2<T, List<T>> List(List<T> cons) { return Tuple.of(cons.head(), cons.tail()); }
// Stream
@Unapply static <T> Tuple2<T, Stream<T>> Stream(Stream.Cons<T> cons) { return Tuple.of(cons.head(), cons.tail()); } | 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2016 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang;
import javaslang.collection.List;
import javaslang.collection.Stream;
import javaslang.concurrent.Future;
import javaslang.control.Either;
import javaslang.control.Option;
import javaslang.control.Try;
import javaslang.control.Validation;
import javaslang.match.annotation.Unapply;
import javaslang.match.annotation.Patterns;
@Patterns class $ {
// -- javaslang
// Tuple0-N
@Unapply static Tuple0 Tuple0(Tuple0 tuple0) { return tuple0; }
@Unapply static <T1> Tuple1<T1> Tuple1(Tuple1<T1> tuple1) { return tuple1; }
@Unapply static <T1, T2> Tuple2<T1, T2> Tuple2(Tuple2<T1, T2> tuple2) { return tuple2; }
@Unapply static <T1, T2, T3> Tuple3<T1, T2, T3> Tuple3(Tuple3<T1, T2, T3> tuple3) { return tuple3; }
@Unapply static <T1, T2, T3, T4> Tuple4<T1, T2, T3, T4> Tuple4(Tuple4<T1, T2, T3, T4> tuple4) { return tuple4; }
@Unapply static <T1, T2, T3, T4, T5> Tuple5<T1, T2, T3, T4, T5> Tuple5(Tuple5<T1, T2, T3, T4, T5> tuple5) { return tuple5; }
@Unapply static <T1, T2, T3, T4, T5, T6> Tuple6<T1, T2, T3, T4, T5, T6> Tuple6(Tuple6<T1, T2, T3, T4, T5, T6> tuple6) { return tuple6; }
@Unapply static <T1, T2, T3, T4, T5, T6, T7> Tuple7<T1, T2, T3, T4, T5, T6, T7> Tuple7(Tuple7<T1, T2, T3, T4, T5, T6, T7> tuple7) { return tuple7; }
@Unapply static <T1, T2, T3, T4, T5, T6, T7, T8> Tuple8<T1, T2, T3, T4, T5, T6, T7, T8> Tuple8(Tuple8<T1, T2, T3, T4, T5, T6, T7, T8> tuple8) { return tuple8; }
// -- javaslang.collection
// List
@Unapply static <T> Tuple2<T, List<T>> List(List.Cons<T> cons) { return Tuple.of(cons.head(), cons.tail()); }
@Unapply static <T> Tuple0 List(List.Nil<T> nil) { return Tuple.empty(); }
// Stream
@Unapply static <T> Tuple2<T, Stream<T>> Stream(Stream.Cons<T> cons) { return Tuple.of(cons.head(), cons.tail()); }
@Unapply static <T> Tuple0 Stream(Stream.Empty<T> empty) { return Tuple.empty(); }
// TODO: Tree
// -- javaslang.concurrent
@Unapply static <T> Tuple1<Option<Try<T>>> Future(Future<T> future) { return Tuple.of(future.getValue()); }
// -- javaslang.control
// Either
@Unapply static <L, R> Tuple1<R> Right(Either.Right<L, R> right) { return Tuple.of(right.get()); }
@Unapply static <L, R> Tuple1<L> Left(Either.Left<L, R> left) { return Tuple.of(left.getLeft()); }
// Option
@Unapply static <T> Tuple1<T> Some(Option.Some<T> some) { return Tuple.of(some.get()); }
@Unapply static <T> Tuple0 None(Option.None<T> none) { return Tuple.empty(); }
// Try
@Unapply static <T> Tuple1<T> Success(Try.Success<T> success) { return Tuple.of(success.get()); }
@Unapply static <T> Tuple1<Throwable> Failure(Try.Failure<T> failure) { return Tuple.of(failure.getCause()); }
// Validation
@Unapply static <E, T> Tuple1<T> Valid(Validation.Valid<E, T> valid) { return Tuple.of(valid.get()); }
@Unapply static <E, T> Tuple1<E> Invalid(Validation.Invalid<E, T> invalid) { return Tuple.of(invalid.getError()); }
}
| 1 | 8,181 | @danieldietrich, this wasn't tested so I presumed it wasn't working before either :p | vavr-io-vavr | java |
@@ -22,7 +22,7 @@
* This copyright notice MUST APPEAR in all copies of the script!
***************************************************************/
use TYPO3\CMS\Core\Utility\GeneralUtility;
-
+use ApacheSolrForTypo3\Solr\Facet\Facet;
/**
* Query group facet renderer.
* | 1 | <?php
/***************************************************************
* Copyright notice
*
* (c) 2012-2015 Ingo Renner <ingo@typo3.org>
* All rights reserved
*
* This script is part of the TYPO3 project. The TYPO3 project is
* free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The GNU General Public License can be found at
* http://www.gnu.org/copyleft/gpl.html.
*
* This script is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This copyright notice MUST APPEAR in all copies of the script!
***************************************************************/
use TYPO3\CMS\Core\Utility\GeneralUtility;
/**
* Query group facet renderer.
*
* @author Ingo Renner <ingo@typo3.org>
* @package TYPO3
* @subpackage solr
*/
class Tx_Solr_Facet_QueryGroupFacetRenderer extends Tx_Solr_Facet_SimpleFacetRenderer {
/**
* Provides the internal type of facets the renderer handles.
* The type is one of field, range, or query.
*
* @return string Facet internal type
*/
public static function getFacetInternalType() {
return Tx_Solr_Facet_Facet::TYPE_QUERY;
}
/**
* Encodes the facet option values from raw Lucene queries to values that
* can be easily used in rendering instructions and URL generation.
*
* (non-PHPdoc)
* @see Tx_Solr_Facet_AbstractFacetRenderer::getFacetOptions()
*/
public function getFacetOptions() {
$facetOptions = array();
$facetOptionsRaw = parent::getFacetOptions();
$filterEncoder = GeneralUtility::makeInstance('Tx_Solr_Query_FilterEncoder_QueryGroup');
foreach ($facetOptionsRaw as $facetOption => $numberOfResults) {
$facetOption = $filterEncoder->encodeFilter($facetOption, $this->facetConfiguration);
$facetOptions[$facetOption] = $numberOfResults;
}
return $facetOptions;
}
}
| 1 | 5,531 | Please leave two empty lines after the `use` statement | TYPO3-Solr-ext-solr | php |
@@ -27,6 +27,7 @@ import (
"bytes"
"fmt"
"io/ioutil"
+ "k8s.io/kubernetes/pkg/kubelet/cm"
"os"
"path"
"path/filepath" | 1 | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@CHANGELOG
KubeEdge Authors: To create mini-kubelet for edge deployment scenario,
This file is derived from K8S Kubelet code with reduced set of methods
Changes done are
1. Package edged got some functions from "k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go"
and made some variant
*/
package edged
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"github.com/kubeedge/beehive/pkg/common/log"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/util/format"
utilfile "k8s.io/kubernetes/pkg/util/file"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
"k8s.io/kubernetes/pkg/volume/validation"
)
const (
etcHostsPath = "/etc/hosts"
)
// GetActivePods returns non-terminal pods
func (e *edged) GetActivePods() []*v1.Pod {
allPods := e.podManager.GetPods()
activePods := e.filterOutTerminatedPods(allPods)
return activePods
}
// filterOutTerminatedPods returns the given pods which the status manager
// does not consider failed or succeeded.
func (e *edged) filterOutTerminatedPods(pods []*v1.Pod) []*v1.Pod {
var filteredPods []*v1.Pod
for _, p := range pods {
if e.podIsTerminated(p) {
continue
}
filteredPods = append(filteredPods, p)
}
return filteredPods
}
// truncatePodHostnameIfNeeded truncates the pod hostname if it's longer than 63 chars.
func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) {
// Cap hostname at 63 chars (specification is 64bytes which is 63 chars and the null terminating char).
const hostnameMaxLen = 63
if len(hostname) <= hostnameMaxLen {
return hostname, nil
}
truncated := hostname[:hostnameMaxLen]
log.LOGGER.Errorf("hostname for pod:%q was longer than %d. Truncated hostname to :%q", podName, hostnameMaxLen, truncated)
// hostname should not end with '-' or '.'
truncated = strings.TrimRight(truncated, "-.")
if len(truncated) == 0 {
// This should never happen.
return "", fmt.Errorf("hostname for pod %q was invalid: %q", podName, hostname)
}
return truncated, nil
}
// GeneratePodHostNameAndDomain creates a hostname and domain name for a pod,
// given that pod's spec and annotations or returns an error.
func (e *edged) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) {
// TODO(vmarmol): Handle better.
clusterDomain := "cluster"
hostname := pod.Name
if len(pod.Spec.Hostname) > 0 {
hostname = pod.Spec.Hostname
}
hostname, err := truncatePodHostnameIfNeeded(pod.Name, hostname)
if err != nil {
return "", "", err
}
hostDomain := ""
if len(pod.Spec.Subdomain) > 0 {
hostDomain = fmt.Sprintf("%s.%s.svc.%s", pod.Spec.Subdomain, pod.Namespace, clusterDomain)
}
return hostname, hostDomain, nil
}
// Get a list of pods that have data directories.
func (e *edged) listPodsFromDisk() ([]types.UID, error) {
podInfos, err := ioutil.ReadDir(e.getPodsDir())
if err != nil {
return nil, err
}
pods := []types.UID{}
for i := range podInfos {
if podInfos[i].IsDir() {
pods = append(pods, types.UID(podInfos[i].Name()))
}
}
return pods, nil
}
// hasHostNamespace returns true if hostIPC, hostNetwork, or hostPID are set to true.
func hasHostNamespace(pod *v1.Pod) bool {
if pod.Spec.SecurityContext == nil {
return false
}
return pod.Spec.HostIPC || pod.Spec.HostNetwork || pod.Spec.HostPID
}
// hasHostVolume returns true if the pod spec has a HostPath volume.
func hasHostVolume(pod *v1.Pod) bool {
for _, v := range pod.Spec.Volumes {
if v.HostPath != nil {
return true
}
}
return false
}
// hasNonNamespacedCapability returns true if MKNOD, SYS_TIME, or SYS_MODULE is requested for any container.
func hasNonNamespacedCapability(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil && c.SecurityContext.Capabilities != nil {
for _, cap := range c.SecurityContext.Capabilities.Add {
if cap == "MKNOD" || cap == "SYS_TIME" || cap == "SYS_MODULE" {
return true
}
}
}
}
return false
}
// HasPrivilegedContainer returns true if any of the containers in the pod are privileged.
func hasPrivilegedContainer(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil &&
c.SecurityContext.Privileged != nil &&
*c.SecurityContext.Privileged {
return true
}
}
return false
}
// enableHostUserNamespace determines if the host user namespace should be used by the container runtime.
// Returns true if the pod is using a host pid, pic, or network namespace, the pod is using a non-namespaced
// capability, the pod contains a privileged container, or the pod has a host path volume.
//
// NOTE: when if a container shares any namespace with another container it must also share the user namespace
// or it will not have the correct capabilities in the namespace. This means that host user namespace
// is enabled per pod, not per container.
func (e *edged) enableHostUserNamespace(pod *v1.Pod) bool {
if hasPrivilegedContainer(pod) || hasHostNamespace(pod) ||
hasHostVolume(pod) || hasNonNamespacedCapability(pod) {
return true
}
return false
}
// podIsTerminated returns true if pod is in the terminated state ("Failed" or "Succeeded").
func (e *edged) podIsTerminated(pod *v1.Pod) bool {
// Check the cached pod status which was set after the last sync.
status, ok := e.statusManager.GetPodStatus(pod.UID)
if !ok {
// If there is no cached status, use the status from the
// apiserver. This is useful if kubelet has recently been
// restarted.
status = pod.Status
}
return status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(status.ContainerStatuses))
}
// makePodDataDirs creates the dirs for the pod datas.
func (e *edged) makePodDataDirs(pod *v1.Pod) error {
uid := pod.UID
if err := os.MkdirAll(e.getPodDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(e.getPodVolumesDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(e.getPodPluginsDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
return nil
}
func (e *edged) makePodDir() error {
if err := os.MkdirAll(e.getPodsDir(), 0750); err != nil && !os.IsExist(err) {
return err
}
return nil
}
// notRunning returns true if every status is terminated or waiting, or the status list
// is empty.
func notRunning(statuses []v1.ContainerStatus) bool {
for _, status := range statuses {
if status.State.Terminated == nil && status.State.Waiting == nil {
return false
}
}
return true
}
func (e *edged) GenerateContainerOptions(pod *v1.Pod) (*kubecontainer.RunContainerOptions, error) {
opts := kubecontainer.RunContainerOptions{}
hostname, hostDomainName, err := e.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
podName := util.GetUniquePodName(pod)
volumes := e.volumeManager.GetMountedVolumesForPod(podName)
for _, container := range pod.Spec.Containers {
mounts, err := makeMounts(pod, e.getPodDir(pod.UID), &container, hostname, hostDomainName, pod.Status.PodIP, volumes)
if err != nil {
return nil, err
}
opts.Mounts = append(opts.Mounts, mounts...)
}
return &opts, nil
}
// makeMounts determines the mount points for the given container.
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) {
// Kubernetes only mounts on /etc/hosts if:
// - container is not an infrastructure (pause) container
// - container is not already mounting on /etc/hosts
// - OS is not Windows
// Kubernetes will not mount /etc/hosts if:
// - when the Pod sandbox is being created, its IP is still unknown. Hence, PodIP will not have been set.
mountEtcHostsFile := len(podIP) > 0 && runtime.GOOS != "windows"
log.LOGGER.Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile)
mounts := []kubecontainer.Mount{}
for _, mount := range container.VolumeMounts {
// do not mount /etc/hosts if container is already mounting on the path
mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath)
vol, ok := podVolumes[mount.Name]
if !ok || vol.Mounter == nil {
log.LOGGER.Errorf("Mount cannot be satisfied for container %q, because the volume is missing or the volume mounter is nil: %+v", container.Name, mount)
return nil, fmt.Errorf("cannot find volume %q to mount into container %q", mount.Name, container.Name)
}
relabelVolume := false
// If the volume supports SELinux and it has not been
// relabeled already and it is not a read-only volume,
// relabel it and mark it as labeled
if vol.Mounter.GetAttributes().Managed && vol.Mounter.GetAttributes().SupportsSELinux && !vol.SELinuxLabeled {
vol.SELinuxLabeled = true
relabelVolume = true
}
hostPath, err := util.GetPath(vol.Mounter)
if err != nil {
return nil, err
}
if mount.SubPath != "" {
if filepath.IsAbs(mount.SubPath) {
return nil, fmt.Errorf("error SubPath `%s` must not be an absolute path", mount.SubPath)
}
err = validation.ValidatePathNoBacksteps(mount.SubPath)
if err != nil {
return nil, fmt.Errorf("unable to provision SubPath `%s`: %v", mount.SubPath, err)
}
fileinfo, err := os.Lstat(hostPath)
if err != nil {
return nil, err
}
perm := fileinfo.Mode()
hostPath = filepath.Join(hostPath, mount.SubPath)
if subPathExists, err := utilfile.FileOrSymlinkExists(hostPath); err != nil {
log.LOGGER.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath)
} else if !subPathExists {
// Create the sub path now because if it's auto-created later when referenced, it may have an
// incorrect ownership and mode. For example, the sub path directory must have at least g+rwx
// when the pod specifies an fsGroup, and if the directory is not created here, Docker will
// later auto-create it with the incorrect mode 0750
if err := os.MkdirAll(hostPath, perm); err != nil {
log.LOGGER.Errorf("failed to mkdir:%s", hostPath)
return nil, err
}
// chmod the sub path because umask may have prevented us from making the sub path with the same
// permissions as the mounter path
if err := os.Chmod(hostPath, perm); err != nil {
return nil, err
}
}
}
// Docker Volume Mounts fail on Windows if it is not of the form C:/
containerPath := mount.MountPath
if runtime.GOOS == "windows" {
if (strings.HasPrefix(hostPath, "/") || strings.HasPrefix(hostPath, "\\")) && !strings.Contains(hostPath, ":") {
hostPath = "c:" + hostPath
}
}
if !filepath.IsAbs(containerPath) {
containerPath = makeAbsolutePath(runtime.GOOS, containerPath)
}
// Extend the path according to extend type of mount volume, by appending the pod metadata to the path.
// TODO: this logic is added by Huawei, make sure what this for and remove it
// extendVolumePath := volumehelper.GetExtendVolumePath(pod, container, mount.ExtendPathMode)
// if extendVolumePath != "" {
// hostPath = filepath.Join(hostPath, extendVolumePath)
// }
propagation, err := translateMountPropagation(mount.MountPropagation)
if err != nil {
return nil, err
}
log.LOGGER.Infof("Pod %q container %q mount %q has propagation %q", format.Pod(pod), container.Name, mount.Name, propagation)
mounts = append(mounts, kubecontainer.Mount{
Name: mount.Name,
ContainerPath: containerPath,
HostPath: hostPath,
ReadOnly: mount.ReadOnly,
SELinuxRelabel: relabelVolume,
Propagation: propagation,
})
}
if mountEtcHostsFile {
hostAliases := pod.Spec.HostAliases
hostsMount, err := makeHostsMount(podDir, podIP, hostName, hostDomain, hostAliases, pod.Spec.HostNetwork)
if err != nil {
return nil, err
}
mounts = append(mounts, *hostsMount)
}
return mounts, nil
}
func makeAbsolutePath(goos, path string) string {
if goos != "windows" {
return "/" + path
}
// These are all for windows
// If there is a colon, give up.
if strings.Contains(path, ":") {
return path
}
// If there is a slash, but no drive, add 'c:'
if strings.HasPrefix(path, "/") || strings.HasPrefix(path, "\\") {
return "c:" + path
}
// Otherwise, add 'c:\'
return "c:\\" + path
}
// translateMountPropagation transforms v1.MountPropagationMode to
// runtimeapi.MountPropagation.
func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.MountPropagation, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) {
// mount propagation is disabled, use private as in the old versions
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
}
switch {
case mountMode == nil:
// HostToContainer is the default
return runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER, nil
case *mountMode == v1.MountPropagationHostToContainer:
return runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER, nil
case *mountMode == v1.MountPropagationBidirectional:
return runtimeapi.MountPropagation_PROPAGATION_BIDIRECTIONAL, nil
default:
return 0, fmt.Errorf("invalid MountPropagation mode: %q", mountMode)
}
}
// makeHostsMount makes the mountpoint for the hosts file that the containers
// in a pod are injected with.
func makeHostsMount(podDir, podIP, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) (*kubecontainer.Mount, error) {
hostsFilePath := path.Join(podDir, "etc-hosts")
if err := ensureHostsFile(hostsFilePath, podIP, hostName, hostDomainName, hostAliases, useHostNetwork); err != nil {
return nil, err
}
return &kubecontainer.Mount{
Name: "k8s-managed-etc-hosts",
ContainerPath: etcHostsPath,
HostPath: hostsFilePath,
ReadOnly: false,
SELinuxRelabel: true,
}, nil
}
// ensureHostsFile ensures that the given host file has an up-to-date ip, host
// name, and domain name.
func ensureHostsFile(fileName, hostIP, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) error {
var hostsFileContent []byte
var err error
if useHostNetwork {
// if Pod is using host network, read hosts file from the node's filesystem.
// `etcHostsPath` references the location of the hosts file on the node.
// `/etc/hosts` for *nix systems.
hostsFileContent, err = nodeHostsFileContent(etcHostsPath, hostAliases)
if err != nil {
return err
}
} else {
// if Pod is not using host network, create a managed hosts file with Pod IP and other information.
hostsFileContent = managedHostsFileContent(hostIP, hostName, hostDomainName, hostAliases)
}
return ioutil.WriteFile(fileName, hostsFileContent, 0644)
}
// nodeHostsFileContent reads the content of node's hosts file.
func nodeHostsFileContent(hostsFilePath string, hostAliases []v1.HostAlias) ([]byte, error) {
hostsFileContent, err := ioutil.ReadFile(hostsFilePath)
if err != nil {
return nil, err
}
hostsFileContent = append(hostsFileContent, hostsEntriesFromHostAliases(hostAliases)...)
return hostsFileContent, nil
}
func hostsEntriesFromHostAliases(hostAliases []v1.HostAlias) []byte {
if len(hostAliases) == 0 {
return []byte{}
}
var buffer bytes.Buffer
buffer.WriteString("\n")
buffer.WriteString("# Entries added by HostAliases.\n")
// write each IP/hostname pair as an entry into hosts file
for _, hostAlias := range hostAliases {
for _, hostname := range hostAlias.Hostnames {
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostAlias.IP, hostname))
}
}
return buffer.Bytes()
}
// managedHostsFileContent generates the content of the managed etc hosts based on Pod IP and other
// information.
func managedHostsFileContent(hostIP, hostName, hostDomainName string, hostAliases []v1.HostAlias) []byte {
var buffer bytes.Buffer
buffer.WriteString("# Kubernetes-managed hosts file.\n")
buffer.WriteString("127.0.0.1\tlocalhost\n") // ipv4 localhost
buffer.WriteString("::1\tlocalhost ip6-localhost ip6-loopback\n") // ipv6 localhost
buffer.WriteString("fe00::0\tip6-localnet\n")
buffer.WriteString("fe00::0\tip6-mcastprefix\n")
buffer.WriteString("fe00::1\tip6-allnodes\n")
buffer.WriteString("fe00::2\tip6-allrouters\n")
if len(hostDomainName) > 0 {
buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName))
} else {
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName))
}
hostsFileContent := buffer.Bytes()
hostsFileContent = append(hostsFileContent, hostsEntriesFromHostAliases(hostAliases)...)
return hostsFileContent
}
// IsPodTerminated returns trus if the pod with the provided UID is in a terminated state ("Failed" or "Succeeded")
// or if the pod has been deleted or removed
func (e *edged) IsPodTerminated(uid types.UID) bool {
pod, podFound := e.podManager.GetPodByUID(uid)
if !podFound {
return true
}
return e.podIsTerminated(pod)
}
func podIsEvicted(podStatus v1.PodStatus) bool {
return podStatus.Phase == v1.PodFailed && podStatus.Reason == "Evicted"
}
// IsPodDeleted returns true if the pod is deleted. For the pod to be deleted, either:
// 1. The pod object is deleted
// 2. The pod's status is evicted
// 3. The pod's deletion timestamp is set, and containers are not running
func (e *edged) IsPodDeleted(uid types.UID) bool {
pod, podFound := e.podManager.GetPodByUID(uid)
if !podFound {
return true
}
status, statusFound := e.statusManager.GetPodStatus(pod.UID)
if !statusFound {
status = pod.Status
}
return podIsEvicted(status) || (pod.DeletionTimestamp != nil && notRunning(status.ContainerStatuses))
}
// removeOrphanedPodStatuses removes obsolete entries in podStatus where
// the pod is no longer considered bound to this node.
func (e *edged) removeOrphanedPodStatuses(pods []*v1.Pod) {
podUIDs := make(map[types.UID]bool)
for _, pod := range pods {
podUIDs[pod.UID] = true
}
e.statusManager.RemoveOrphanedStatuses(podUIDs)
}
// GetPodCgroupParent gets pod cgroup parent from container manager.
func (e *edged) GetPodCgroupParent(pod *v1.Pod) string {
/*pcm := e.containerManager.NewPodContainerManager()
_, cgroupParent := pcm.GetPodContainerName(pod)
return cgroupParent*/
return "systemd"
}
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
// the container runtime to set parameters for launching a container.
func (e *edged) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, func(), error) {
/*opts, err := e.GenerateContainerOptions(pod)
if err != nil {
return nil, nil, err
}*/
opts := kubecontainer.RunContainerOptions{}
hostname, hostDomainName, err := e.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, nil, err
}
opts.Hostname = hostname
podName := util.GetUniquePodName(pod)
volumes := e.volumeManager.GetMountedVolumesForPod(podName)
opts.PortMappings = kubecontainer.MakePortMappings(container)
// TODO: remove feature gate check after no longer needed
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
blkutil := volumepathhandler.NewBlockVolumePathHandler()
blkVolumes, err := e.makeBlockVolumes(pod, container, volumes, blkutil)
if err != nil {
return nil, nil, err
}
opts.Devices = append(opts.Devices, blkVolumes...)
}
/*envs, err := e.makeEnvironmentVariables(pod, container, podIP)
if err != nil {
return nil, nil, err
}
opts.Envs = append(opts.Envs, envs...)*/
mounts, err := makeMounts(pod, e.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes)
if err != nil {
return nil, nil, err
}
opts.Mounts = append(opts.Mounts, mounts...)
// Disabling adding TerminationMessagePath on Windows as these files would be mounted as docker volume and
// Docker for Windows has a bug where only directories can be mounted
if len(container.TerminationMessagePath) != 0 && runtime.GOOS != "windows" {
p := e.getPodContainerDir(pod.UID, container.Name)
if err := os.MkdirAll(p, 0750); err != nil {
glog.Errorf("Error on creating %q: %v", p, err)
} else {
opts.PodContainerDir = p
}
}
return &opts, nil, nil
}
// GetPodDNS returns DNS settings for the pod.
// This function is defined in kubecontainer.RuntimeHelper interface so we
// have to implement it.
func (e *edged) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) {
dnsConfig := &runtimeapi.DNSConfig{Servers: []string{""}}
return dnsConfig, nil
}
// Make the environment variables for a pod in the given namespace.
/*func (e *edged) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string) ([]kubecontainer.EnvVar, error) {
if pod.Spec.EnableServiceLinks == nil {
return nil, fmt.Errorf("nil pod.spec.enableServiceLinks encountered, cannot construct envvars")
}
var result []kubecontainer.EnvVar
// Note: These are added to the docker Config, but are not included in the checksum computed
// by kubecontainer.HashContainer(...). That way, we can still determine whether an
// v1.Container is already running by its hash. (We don't want to restart a container just
// because some service changed.)
//
// Note that there is a race between Kubelet seeing the pod and kubelet seeing the service.
// To avoid this users can: (1) wait between starting a service and starting; or (2) detect
// missing service env var and exit and be restarted; or (3) use DNS instead of env vars
// and keep trying to resolve the DNS name of the service (recommended).
serviceEnv, err := e.getServiceEnvVarMap(pod.Namespace, *pod.Spec.EnableServiceLinks)
if err != nil {
return result, err
}
var (
configMaps = make(map[string]*v1.ConfigMap)
secrets = make(map[string]*v1.Secret)
tmpEnv = make(map[string]string)
)
// Env will override EnvFrom variables.
// Process EnvFrom first then allow Env to replace existing values.
for _, envFrom := range container.EnvFrom {
switch {
case envFrom.ConfigMapRef != nil:
cm := envFrom.ConfigMapRef
name := cm.Name
configMap, ok := configMaps[name]
if !ok {
if e.kubeClient == nil {
return result, fmt.Errorf("Couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
}
optional := cm.Optional != nil && *cm.Optional
configMap, err = e.configMapManager.GetConfigMap(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
configMaps[name] = configMap
}
invalidKeys := []string{}
for k, v := range configMap.Data {
if len(envFrom.Prefix) > 0 {
k = envFrom.Prefix + k
}
if errMsgs := utilvalidation.IsEnvVarName(k); len(errMsgs) != 0 {
invalidKeys = append(invalidKeys, k)
continue
}
tmpEnv[k] = v
}
if len(invalidKeys) > 0 {
sort.Strings(invalidKeys)
e.recorder.Eventf(pod, v1.EventTypeWarning, "InvalidEnvironmentVariableNames", "Keys [%s] from the EnvFrom configMap %s/%s were skipped since they are considered invalid environment variable names.", strings.Join(invalidKeys, ", "), pod.Namespace, name)
}
case envFrom.SecretRef != nil:
s := envFrom.SecretRef
name := s.Name
secret, ok := secrets[name]
if !ok {
if e.kubeClient == nil {
return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
}
optional := s.Optional != nil && *s.Optional
secret, err = e.secretManager.GetSecret(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
secrets[name] = secret
}
invalidKeys := []string{}
for k, v := range secret.Data {
if len(envFrom.Prefix) > 0 {
k = envFrom.Prefix + k
}
if errMsgs := utilvalidation.IsEnvVarName(k); len(errMsgs) != 0 {
invalidKeys = append(invalidKeys, k)
continue
}
tmpEnv[k] = string(v)
}
if len(invalidKeys) > 0 {
sort.Strings(invalidKeys)
e.recorder.Eventf(pod, v1.EventTypeWarning, "InvalidEnvironmentVariableNames", "Keys [%s] from the EnvFrom secret %s/%s were skipped since they are considered invalid environment variable names.", strings.Join(invalidKeys, ", "), pod.Namespace, name)
}
}
}
// Determine the final values of variables:
//
// 1. Determine the final value of each variable:
// a. If the variable's Value is set, expand the `$(var)` references to other
// variables in the .Value field; the sources of variables are the declared
// variables of the container and the service environment variables
// b. If a source is defined for an environment variable, resolve the source
// 2. Create the container's environment in the order variables are declared
// 3. Add remaining service environment vars
var (
mappingFunc = expansion.MappingFuncFor(tmpEnv, serviceEnv)
)
for _, envVar := range container.Env {
runtimeVal := envVar.Value
if runtimeVal != "" {
// Step 1a: expand variable references
runtimeVal = expansion.Expand(runtimeVal, mappingFunc)
} else if envVar.ValueFrom != nil {
// Step 1b: resolve alternate env var sources
switch {
case envVar.ValueFrom.FieldRef != nil:
runtimeVal, err = e.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP)
if err != nil {
return result, err
}
case envVar.ValueFrom.ResourceFieldRef != nil:
defaultedPod, defaultedContainer, err := e.defaultPodLimitsForDownwardAPI(pod, container)
if err != nil {
return result, err
}
runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, defaultedContainer)
if err != nil {
return result, err
}
case envVar.ValueFrom.ConfigMapKeyRef != nil:
cm := envVar.ValueFrom.ConfigMapKeyRef
name := cm.Name
key := cm.Key
optional := cm.Optional != nil && *cm.Optional
configMap, ok := configMaps[name]
if !ok {
if e.kubeClient == nil {
return result, fmt.Errorf("Couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
}
configMap, err = e.configMapManager.GetConfigMap(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
configMaps[name] = configMap
}
runtimeVal, ok = configMap.Data[key]
if !ok {
if optional {
continue
}
return result, fmt.Errorf("Couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name)
}
case envVar.ValueFrom.SecretKeyRef != nil:
s := envVar.ValueFrom.SecretKeyRef
name := s.Name
key := s.Key
optional := s.Optional != nil && *s.Optional
secret, ok := secrets[name]
if !ok {
if e.kubeClient == nil {
return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
}
secret, err = e.secretManager.GetSecret(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
secrets[name] = secret
}
runtimeValBytes, ok := secret.Data[key]
if !ok {
if optional {
continue
}
return result, fmt.Errorf("Couldn't find key %v in Secret %v/%v", key, pod.Namespace, name)
}
runtimeVal = string(runtimeValBytes)
}
}
// Accesses apiserver+Pods.
// So, the master may set service env vars, or kubelet may. In case both are doing
// it, we delete the key from the kubelet-generated ones so we don't have duplicate
// env vars.
// TODO: remove this next line once all platforms use apiserver+Pods.
delete(serviceEnv, envVar.Name)
tmpEnv[envVar.Name] = runtimeVal
}
// Append the env vars
for k, v := range tmpEnv {
result = append(result, kubecontainer.EnvVar{Name: k, Value: v})
}
// Append remaining service env vars.
for k, v := range serviceEnv {
// Accesses apiserver+Pods.
// So, the master may set service env vars, or kubelet may. In case both are doing
// it, we skip the key from the kubelet-generated ones so we don't have duplicate
// env vars.
// TODO: remove this next line once all platforms use apiserver+Pods.
if _, present := tmpEnv[k]; !present {
result = append(result, kubecontainer.EnvVar{Name: k, Value: v})
}
}
return result, nil
}*/
// makeBlockVolumes maps the raw block devices specified in the path of the container
// Experimental
func (e edged) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumepathhandler.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) {
var devices []kubecontainer.DeviceInfo
for _, device := range container.VolumeDevices {
// check path is absolute
if !filepath.IsAbs(device.DevicePath) {
return nil, fmt.Errorf("error DevicePath `%s` must be an absolute path", device.DevicePath)
}
vol, ok := podVolumes[device.Name]
if !ok || vol.BlockVolumeMapper == nil {
glog.Errorf("Block volume cannot be satisfied for container %q, because the volume is missing or the volume mapper is nil: %+v", container.Name, device)
return nil, fmt.Errorf("cannot find volume %q to pass into container %q", device.Name, container.Name)
}
// Get a symbolic link associated to a block device under pod device path
dirPath, volName := vol.BlockVolumeMapper.GetPodDeviceMapPath()
symlinkPath := path.Join(dirPath, volName)
if islinkExist, checkErr := blkutil.IsSymlinkExist(symlinkPath); checkErr != nil {
return nil, checkErr
} else if islinkExist {
// Check readOnly in PVCVolumeSource and set read only permission if it's true.
permission := "mrw"
if vol.ReadOnly {
permission = "r"
}
glog.V(4).Infof("Device will be attached to container %q. Path on host: %v", container.Name, symlinkPath)
devices = append(devices, kubecontainer.DeviceInfo{PathOnHost: symlinkPath, PathInContainer: device.DevicePath, Permissions: permission})
}
}
return devices, nil
}
| 1 | 14,565 | Please move this import in the 3rd group which is for external dependencies | kubeedge-kubeedge | go |
@@ -63,7 +63,7 @@ def _path_hash(path, transform, kwargs):
return digest_string(srcinfo)
def _is_internal_node(node):
- is_leaf = not node or node.get(RESERVED['file'])
+ is_leaf = not node or isinstance(node.get(RESERVED['file']), str)
return not is_leaf
def _pythonize_name(name): | 1 | """
parse build file, serialize package
"""
from collections import defaultdict, Iterable
import glob
import importlib
import json
import os
import re
from types import ModuleType
import numpy as np
import pandas as pd
from pandas import DataFrame as df
from six import iteritems, string_types
import yaml
from tqdm import tqdm
from .compat import pathlib
from .const import DEFAULT_BUILDFILE, PACKAGE_DIR_NAME, PARSERS, RESERVED
from .core import PackageFormat
from .hashing import digest_file, digest_string
from .package import Package, ParquetLib
from .store import PackageStore, StoreException
from .util import FileWithReadProgress, is_nodename, to_nodename
from . import check_functions as qc # pylint:disable=W0611
class BuildException(Exception):
"""
Build-time exception class
"""
pass
def _have_pyspark():
"""
Check if we're running Pyspark
"""
if _have_pyspark.flag is None:
try:
if Package.get_parquet_lib() is ParquetLib.SPARK:
import pyspark # pylint:disable=W0612
_have_pyspark.flag = True
else:
_have_pyspark.flag = False
except ImportError:
_have_pyspark.flag = False
return _have_pyspark.flag
_have_pyspark.flag = None
def _path_hash(path, transform, kwargs):
"""
Generate a hash of source file path + transform + args
"""
sortedargs = ["%s:%r:%s" % (key, value, type(value))
for key, value in sorted(iteritems(kwargs))]
srcinfo = "{path}:{transform}:{{{kwargs}}}".format(path=os.path.abspath(path),
transform=transform,
kwargs=",".join(sortedargs))
return digest_string(srcinfo)
def _is_internal_node(node):
is_leaf = not node or node.get(RESERVED['file'])
return not is_leaf
def _pythonize_name(name):
safename = re.sub('[^A-Za-z0-9]+', '_', name).strip('_')
if safename and safename[0].isdigit():
safename = "n%s" % safename
if not is_nodename(safename):
raise BuildException("Unable to determine a Python-legal name for %r" % name)
return safename
def _run_checks(dataframe, checks, checks_contents, nodename, rel_path, target, env='default'):
_ = env # TODO: env support for checks
print("Running data integrity checks...")
checks_list = re.split(r'[,\s]+', checks.strip())
unknown_checks = set(checks_list) - set(checks_contents)
if unknown_checks:
raise BuildException("Unknown check(s) '%s' for %s @ %s" %
(", ".join(list(unknown_checks)), rel_path, target))
for check in checks_list:
res = exec_yaml_python(checks_contents[check], dataframe, nodename, rel_path, target)
if not res and res is not None:
raise BuildException("Data check failed: %s on %s @ %s" % (
check, rel_path, target))
def _gen_glob_data(dir, pattern, child_table):
"""Generates node data by globbing a directory for a pattern"""
dir = pathlib.Path(dir)
matched = False
used_names = set() # Used by to_nodename to prevent duplicate names
# sorted so that renames (if any) are consistently ordered
for filepath in sorted(dir.glob(pattern)):
if filepath.is_dir():
continue
else:
matched = True
# create node info
node_table = {} if child_table is None else child_table.copy()
filepath = filepath.relative_to(dir)
node_table[RESERVED['file']] = str(filepath)
node_name = to_nodename(filepath.stem, invalid=used_names)
used_names.add(node_name)
print("Matched with {!r}: {!r}".format(pattern, str(filepath)))
yield node_name, node_table
if not matched:
print("Warning: {!r} matched no files.".format(pattern))
return
def _build_node(build_dir, package, name, node, fmt, target='pandas', checks_contents=None,
dry_run=False, env='default', ancestor_args={}):
"""
Parameters
----------
ancestor_args : dict
any transform inherited from an ancestor
plus any inherited handler kwargs
Users can thus define kwargs that affect entire subtrees
(e.g. transform: csv for 500 .txt files)
and overriding of ancestor or peer values.
Child transform or kwargs override ancestor k:v pairs.
"""
if _is_internal_node(node):
# NOTE: YAML parsing does not guarantee key order
# fetch local transform and kwargs values; we do it using ifs
# to prevent `key: None` from polluting the update
local_args = {}
if node.get(RESERVED['transform']):
local_args[RESERVED['transform']] = node[RESERVED['transform']]
if node.get(RESERVED['kwargs']):
local_args[RESERVED['kwargs']] = node[RESERVED['kwargs']]
group_args = ancestor_args.copy()
group_args.update(local_args)
# if it's not a reserved word it's a group that we can descend
groups = {k: v for k, v in iteritems(node) if k not in RESERVED}
for child_name, child_table in groups.items():
if glob.has_magic(child_name):
# child_name is a glob string, use it to generate multiple child nodes
for gchild_name, gchild_table in _gen_glob_data(build_dir, child_name, child_table):
full_gchild_name = name + '/' + gchild_name if name else gchild_name
_build_node(build_dir, package, full_gchild_name, gchild_table, fmt,
checks_contents=checks_contents, dry_run=dry_run, env=env, ancestor_args=group_args)
else:
if not isinstance(child_name, str) or not is_nodename(child_name):
raise StoreException("Invalid node name: %r" % child_name)
full_child_name = name + '/' + child_name if name else child_name
_build_node(build_dir, package, full_child_name, child_table, fmt,
checks_contents=checks_contents, dry_run=dry_run, env=env, ancestor_args=group_args)
else: # leaf node
# prevent overwriting existing node names
if name in package:
raise BuildException("Naming conflict: {!r} added to package more than once".format(name))
# handle group leaf nodes (empty groups)
if not node:
if not dry_run:
package.save_group(name)
return
# handle remaining leaf nodes types
rel_path = node.get(RESERVED['file'])
if not rel_path:
raise BuildException("Leaf nodes must define a %s key" % RESERVED['file'])
path = os.path.join(build_dir, rel_path)
# get either the locally defined transform or inherit from an ancestor
transform = node.get(RESERVED['transform']) or ancestor_args.get(RESERVED['transform'])
ID = 'id' # pylint:disable=C0103
if transform:
transform = transform.lower()
if (transform not in PARSERS) and (transform != ID):
raise BuildException("Unknown transform '%s' for %s @ %s" %
(transform, rel_path, target))
else: # guess transform if user doesn't provide one
_, ext = splitext_no_dot(rel_path)
transform = ext
if transform not in PARSERS:
transform = ID
print("Inferring 'transform: %s' for %s" % (transform, rel_path))
# TODO: parse/check environments:
# environments = node.get(RESERVED['environments'])
checks = node.get(RESERVED['checks'])
if transform == ID:
#TODO move this to a separate function
if checks:
with open(path, 'r') as fd:
data = fd.read()
_run_checks(data, checks, checks_contents, name, rel_path, target, env=env)
if not dry_run:
print("Registering %s..." % path)
package.save_file(path, name, rel_path)
else:
# copy so we don't modify shared ancestor_args
handler_args = dict(ancestor_args.get(RESERVED['kwargs'], {}))
# local kwargs win the update
handler_args.update(node.get(RESERVED['kwargs'], {}))
# Check Cache
store = PackageStore()
path_hash = _path_hash(path, transform, handler_args)
source_hash = digest_file(path)
cachedobjs = []
if os.path.exists(store.cache_path(path_hash)):
with open(store.cache_path(path_hash), 'r') as entry:
cache_entry = json.load(entry)
if cache_entry['source_hash'] == source_hash:
cachedobjs = cache_entry['obj_hashes']
assert isinstance(cachedobjs, list)
# Check to see that cached objects actually exist in the store
# TODO: check for changes in checks else use cache
# below is a heavy-handed fix but it's OK for check builds to be slow
if not checks and cachedobjs and all(os.path.exists(store.object_path(obj)) for obj in cachedobjs):
# Use existing objects instead of rebuilding
package.save_cached_df(cachedobjs, name, rel_path, transform, target, fmt)
else:
# read source file into DataFrame
print("Serializing %s..." % path)
if _have_pyspark():
dataframe = _file_to_spark_data_frame(transform, path, target, handler_args)
else:
dataframe = _file_to_data_frame(transform, path, target, handler_args)
if checks:
# TODO: test that design works for internal nodes... e.g. iterating
# over the children and getting/checking the data, err msgs, etc.
_run_checks(dataframe, checks, checks_contents, name, rel_path, target, env=env)
# serialize DataFrame to file(s)
if not dry_run:
print("Saving as binary dataframe...")
obj_hashes = package.save_df(dataframe, name, rel_path, transform, target, fmt)
# Add to cache
cache_entry = dict(
source_hash=source_hash,
obj_hashes=obj_hashes
)
with open(store.cache_path(path_hash), 'w') as entry:
json.dump(cache_entry, entry)
def _remove_keywords(d):
"""
copy the dict, filter_keywords
Parameters
----------
d : dict
"""
return { k:v for k, v in iteritems(d) if k not in RESERVED }
def _file_to_spark_data_frame(ext, path, target, handler_args):
from pyspark import sql as sparksql
_ = target # TODO: why is this unused?
ext = ext.lower() # ensure that case doesn't matter
logic = PARSERS.get(ext)
kwargs = dict(logic['kwargs'])
kwargs.update(handler_args)
spark = sparksql.SparkSession.builder.getOrCreate()
dataframe = None
reader = None
# FIXME: Add json support?
if logic['attr'] == "read_csv":
sep = kwargs.get('sep')
reader = spark.read.format("csv").option("header", "true")
if sep:
reader = reader.option("delimiter", sep)
dataframe = reader.load(path)
for col in dataframe.columns:
pcol = _pythonize_name(col)
if col != pcol:
dataframe = dataframe.withColumnRenamed(col, pcol)
else:
dataframe = _file_to_data_frame(ext, path, target, handler_args)
return dataframe
def _file_to_data_frame(ext, path, target, handler_args):
_ = target # TODO: why is this unused?
logic = PARSERS.get(ext)
the_module = importlib.import_module(logic['module'])
if not isinstance(the_module, ModuleType):
raise BuildException("Missing required module: %s." % logic['module'])
# allow user to specify handler kwargs and override default kwargs
kwargs = logic['kwargs'].copy()
kwargs.update(handler_args)
failover = logic.get('failover', None)
handler = getattr(the_module, logic['attr'], None)
if handler is None:
raise BuildException("Invalid handler: %r" % logic['attr'])
dataframe = None
try_again = False
try:
size = os.path.getsize(path)
with tqdm(total=size, unit='B', unit_scale=True) as progress:
def _callback(count):
progress.update(count)
with FileWithReadProgress(path, _callback) as fd:
dataframe = handler(fd, **kwargs)
except ValueError as error:
if failover:
warning = "Warning: failed fast parse on input %s.\n" % path
warning += "Switching to Python engine."
print(warning)
try_again = True
else:
raise BuildException(str(error))
if try_again:
failover_args = {}
failover_args.update(failover)
failover_args.update(kwargs)
try:
dataframe = handler(path, **failover_args)
except ValueError as error:
raise BuildException(str(error))
# cast object columns to strings
# TODO does pyarrow finally support objects?
for name, col in dataframe.iteritems():
if col.dtype == 'object':
dataframe[name] = col.astype(str)
return dataframe
def build_package(team, username, package, yaml_path, checks_path=None, dry_run=False, env='default'):
"""
Builds a package from a given Yaml file and installs it locally.
Returns the name of the package.
"""
def find(key, value):
"""
find matching nodes recursively;
only descend iterables that aren't strings
"""
if isinstance(value, Iterable) and not isinstance(value, string_types):
for k, v in iteritems(value):
if k == key:
yield v
elif isinstance(v, dict):
for result in find(key, v):
yield result
elif isinstance(v, list):
for item in v:
for result in find(key, item):
yield result
build_data = load_yaml(yaml_path)
# default to 'checks.yml' if build.yml contents: contains checks, but
# there's no inlined checks: defined by build.yml
if (checks_path is None and list(find('checks', build_data['contents'])) and
'checks' not in build_data):
checks_path = 'checks.yml'
checks_contents = load_yaml(checks_path, optional=True)
elif checks_path is not None:
checks_contents = load_yaml(checks_path)
else:
checks_contents = None
build_package_from_contents(team, username, package, os.path.dirname(yaml_path), build_data,
checks_contents=checks_contents, dry_run=dry_run, env=env)
def build_package_from_contents(team, username, package, build_dir, build_data,
checks_contents=None, dry_run=False, env='default'):
contents = build_data.get('contents', {})
if not isinstance(contents, dict):
raise BuildException("'contents' must be a dictionary")
pkgformat = build_data.get('format', PackageFormat.default.value)
if not isinstance(pkgformat, str):
raise BuildException("'format' must be a string")
try:
pkgformat = PackageFormat(pkgformat)
except ValueError:
raise BuildException("Unsupported format: %r" % pkgformat)
# HDF5 no longer supported.
if pkgformat is PackageFormat.HDF5:
raise BuildException("HDF5 format is no longer supported; please use PARQUET instead.")
# inline checks take precedence
checks_contents = {} if checks_contents is None else checks_contents
checks_contents.update(build_data.get('checks', {}))
store = PackageStore()
newpackage = store.create_package(team, username, package, dry_run=dry_run)
_build_node(build_dir, newpackage, '', contents, pkgformat,
checks_contents=checks_contents, dry_run=dry_run, env=env)
if not dry_run:
newpackage.save_contents()
def splitext_no_dot(filename):
"""
Wrap os.path.splitext to return the name and the extension
without the '.' (e.g., csv instead of .csv)
"""
name, ext = os.path.splitext(filename)
ext = ext.lower()
return name, ext.strip('.')
def generate_contents(startpath, outfilename=DEFAULT_BUILDFILE):
"""
Generate a build file (yaml) based on the contents of a
directory tree.
"""
def _ignored_name(name):
return (
name.startswith('.') or
name == PACKAGE_DIR_NAME or
name.endswith('~') or
name == outfilename
)
def _generate_contents(dir_path):
safename_duplicates = defaultdict(list)
for name in os.listdir(dir_path):
if _ignored_name(name):
continue
path = os.path.join(dir_path, name)
if os.path.isdir(path):
nodename = name
ext = None
elif os.path.isfile(path):
nodename, ext = splitext_no_dot(name)
else:
continue
safename = _pythonize_name(nodename)
safename_duplicates[safename].append((name, nodename, ext))
safename_to_name = {}
for safename, duplicates in iteritems(safename_duplicates):
for name, nodename, ext in duplicates:
if len(duplicates) > 1 and ext:
new_safename = _pythonize_name(name) # Name with ext
else:
new_safename = safename
existing_name = safename_to_name.get(new_safename)
if existing_name is not None:
raise BuildException(
"Duplicate node names in directory %r. %r was renamed to %r, which overlaps with %r" % (
dir_path, name, new_safename, existing_name)
)
safename_to_name[new_safename] = name
contents = {}
for safename, name in iteritems(safename_to_name):
path = os.path.join(dir_path, name)
if os.path.isdir(path):
data = _generate_contents(path)
else:
rel_path = os.path.relpath(path, startpath)
data = dict(file=rel_path)
contents[safename] = data
return contents
return dict(
contents=_generate_contents(startpath)
)
def generate_build_file(startpath, outfilename=DEFAULT_BUILDFILE):
"""
Generate a build file (yaml) based on the contents of a
directory tree.
"""
buildfilepath = os.path.join(startpath, outfilename)
if os.path.exists(buildfilepath):
raise BuildException("Build file %s already exists." % buildfilepath)
contents = generate_contents(startpath, outfilename)
with open(buildfilepath, 'w') as outfile:
yaml.dump(contents, outfile, default_flow_style=False)
return buildfilepath
def load_yaml(filename, optional=False):
if optional and (filename is None or not os.path.isfile(filename)):
return None
with open(filename, 'r') as fd:
data = fd.read()
try:
res = yaml.load(data)
except yaml.scanner.ScannerError as error:
mark = error.problem_mark
message = ["Bad yaml syntax in {!r}".format(filename),
" Line {}, column {}:".format(mark.line, mark.column)]
message.extend(error.problem_mark.get_snippet().split(os.linesep))
message.append(" " + error.problem)
raise BuildException('\n'.join(message))
if res is None:
if optional:
return None
raise BuildException("Unable to open YAML file: %s" % filename)
return res
def exec_yaml_python(chkcode, dataframe, nodename, path, target='pandas'):
# TODO False vs Exception...
try:
# setup for eval
qc.nodename = nodename
qc.filename = path
qc.data = dataframe
eval_globals = {
'qc': qc, 'numpy': np, 'df': df, 'pd': pd, 're': re
}
# single vs multi-line checks - YAML hackery
if '\n' in str(chkcode):
# note: python2 doesn't support named args for exec()
# https://docs.python.org/2/reference/simple_stmts.html#exec
exec(str(chkcode), eval_globals, {}) # pylint:disable=W0122
res = True
else:
# str() to handle True/False
res = eval(str(chkcode), eval_globals, {}) # pylint:disable=W0123
except qc.CheckFunctionsReturn as ex:
res = ex.result
except Exception as ex:
raise BuildException("Data check raised exception: %s on %s @ %s" % (ex, path, target))
return res
| 1 | 16,211 | So what exactly is this fixing? The unit test is passing with or without this change. | quiltdata-quilt | py |
@@ -601,8 +601,18 @@ bool FileBasedWal::linkCurrentWAL(const char* newPath) {
LOG(INFO) << idStr_ << "Create link failed, there is no wal files!";
return false;
}
+ if (!fs::FileUtils::makeDir(newPath)) {
+ LOG(INFO) << idStr_ << "Link file parent dir make failed : " << newPath;
+ return false;
+ }
+
auto it = walFiles_.rbegin();
- if (link(it->second->path(), newPath) != 0) {
+
+ // Using the original wal file name.
+ auto targetFile = folly::stringPrintf("%s/%s", newPath,
+ it->second->path() + std::string(it->second->path()).rfind('/') + 1);
+
+ if (link(it->second->path(), targetFile.data()) != 0) {
LOG(INFO) << idStr_ << "Create link failed for " << it->second->path()
<< " on " << newPath << ", error:" << strerror(errno);
return false; | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include <utime.h>
#include "kvstore/wal/FileBasedWal.h"
#include "kvstore/wal/FileBasedWalIterator.h"
#include "fs/FileUtils.h"
#include "time/WallClock.h"
namespace nebula {
namespace wal {
using nebula::fs::FileUtils;
/**********************************************
*
* Implementation of FileBasedWal
*
*********************************************/
// static
std::shared_ptr<FileBasedWal> FileBasedWal::getWal(
const folly::StringPiece dir,
const std::string& idStr,
FileBasedWalPolicy policy,
PreProcessor preProcessor) {
return std::shared_ptr<FileBasedWal>(
new FileBasedWal(dir, idStr, std::move(policy), std::move(preProcessor)));
}
FileBasedWal::FileBasedWal(const folly::StringPiece dir,
const std::string& idStr,
FileBasedWalPolicy policy,
PreProcessor preProcessor)
: dir_(dir.toString())
, idStr_(idStr)
, policy_(std::move(policy))
, maxFileSize_(policy_.fileSize)
, maxBufferSize_(policy_.bufferSize)
, preProcessor_(std::move(preProcessor)) {
// Make sure WAL directory exist
if (FileUtils::fileType(dir_.c_str()) == fs::FileType::NOTEXIST) {
FileUtils::makeDir(dir_);
}
scanAllWalFiles();
if (!walFiles_.empty()) {
firstLogId_ = walFiles_.begin()->second->firstId();
auto& info = walFiles_.rbegin()->second;
lastLogId_ = info->lastId();
lastLogTerm_ = info->lastTerm();
LOG(INFO) << idStr_ << "lastLogId in wal is " << lastLogId_
<< ", lastLogTerm is " << lastLogTerm_;
currFd_ = open(info->path(), O_WRONLY | O_APPEND);
currInfo_ = info;
CHECK_GE(currFd_, 0);
}
}
FileBasedWal::~FileBasedWal() {
// FileBasedWal inherits from std::enable_shared_from_this, so at this
// moment, there should have no other thread holding this WAL object
// Close the last file
closeCurrFile();
LOG(INFO) << idStr_ << "~FileBasedWal, dir = " << dir_;
}
void FileBasedWal::scanAllWalFiles() {
std::vector<std::string> files = FileUtils::listAllFilesInDir(dir_.c_str(), false, "*.wal");
for (auto& fn : files) {
// Split the file name
// The file name convention is "<first id in the file>.wal"
std::vector<std::string> parts;
folly::split('.', fn, parts);
if (parts.size() != 2) {
LOG(ERROR) << "Ignore unknown file \"" << fn << "\"";
continue;
}
int64_t startIdFromName;
try {
startIdFromName = folly::to<int64_t>(parts[0]);
} catch (const std::exception& ex) {
LOG(ERROR) << "Ignore bad file name \"" << fn << "\"";
continue;
}
WalFileInfoPtr info = std::make_shared<WalFileInfo>(
FileUtils::joinPath(dir_, fn),
startIdFromName);
walFiles_.insert(std::make_pair(startIdFromName, info));
// Get the size of the file and the mtime
struct stat st;
if (lstat(info->path(), &st) < 0) {
LOG(ERROR) << "Failed to get the size and mtime for \""
<< fn << "\", ignore it";
continue;
}
info->setSize(st.st_size);
info->setMTime(st.st_mtime);
if (info->size() == 0) {
// Found an empty WAL file
LOG(WARNING) << "Found empty wal file \"" << fn << "\"";
info->setLastId(0);
info->setLastTerm(0);
continue;
}
// Open the file
int32_t fd = open(info->path(), O_RDONLY);
if (fd < 0) {
LOG(ERROR) << "Failed to open the file \"" << fn << "\" ("
<< errno << "): " << strerror(errno);
continue;
}
// Read the first log id
LogID firstLogId = -1;
if (read(fd, &firstLogId, sizeof(LogID)) != sizeof(LogID)) {
LOG(ERROR) << "Failed to read the first log id from \""
<< fn << "\" (" << errno << "): "
<< strerror(errno);
close(fd);
continue;
}
if (firstLogId != startIdFromName) {
LOG(ERROR) << "The first log id " << firstLogId
<< " does not match the file name \""
<< fn << "\", ignore it!";
close(fd);
continue;
}
// Read the last log length
if (lseek(fd, -sizeof(int32_t), SEEK_END) < 0) {
LOG(ERROR) << "Failed to seek the last log length from \""
<< fn << "\" (" << errno << "): "
<< strerror(errno);
close(fd);
continue;
}
int32_t succMsgLen;
if (read(fd, &succMsgLen, sizeof(int32_t)) != sizeof(int32_t)) {
LOG(ERROR) << "Failed to read the last log length from \""
<< fn << "\" (" << errno << "): "
<< strerror(errno);
close(fd);
continue;
}
// Verify the last log length
if (lseek(fd,
-(sizeof(int32_t) * 2 + succMsgLen + sizeof(ClusterID)),
SEEK_END) < 0) {
LOG(ERROR) << "Failed to seek the last log length from \""
<< fn << "\" (" << errno << "): "
<< strerror(errno);
close(fd);
continue;
}
int32_t precMsgLen;
if (read(fd, &precMsgLen, sizeof(int32_t)) != sizeof(int32_t)) {
LOG(ERROR) << "Failed to read the last log length from \""
<< fn << "\" (" << errno << "): "
<< strerror(errno);
close(fd);
continue;
}
if (precMsgLen != succMsgLen) {
LOG(ERROR) << "It seems the wal file \"" << fn
<< "\" is corrupted. Ignore it";
// TODO We might want to fix it as much as possible
close(fd);
continue;
}
// Read the last log term
if (lseek(fd,
-(sizeof(int32_t) * 2
+ succMsgLen
+ sizeof(ClusterID)
+ sizeof(TermID)),
SEEK_END) < 0) {
LOG(ERROR) << "Failed to seek the last log term from \""
<< fn << "\" (" << errno << "): "
<< strerror(errno);
close(fd);
continue;
}
TermID term = -1;
if (read(fd, &term, sizeof(TermID)) != sizeof(TermID)) {
LOG(ERROR) << "Failed to read the last log term from \""
<< fn << "\" (" << errno << "): "
<< strerror(errno);
close(fd);
continue;
}
info->setLastTerm(term);
// Read the last log id
if (lseek(fd,
-(sizeof(int32_t) * 2
+ succMsgLen
+ sizeof(ClusterID)
+ sizeof(TermID)
+ sizeof(LogID)),
SEEK_END) < 0) {
LOG(ERROR) << "Failed to seek the last log id from \""
<< fn << "\" (" << errno << "): "
<< strerror(errno);
close(fd);
continue;
}
LogID lastLogId = -1;
if (read(fd, &lastLogId, sizeof(LogID)) != sizeof(LogID)) {
LOG(ERROR) << "Failed to read the last log id from \""
<< fn << "\" (" << errno << "): "
<< strerror(errno);
close(fd);
continue;
}
info->setLastId(lastLogId);
// We now get all necessary info
close(fd);
}
if (!walFiles_.empty()) {
auto it = walFiles_.rbegin();
// Try to scan last wal, if it is invalid or empty, scan the privous one
scanLastWal(it->second, it->second->firstId());
if (it->second->lastId() <= 0) {
unlink(it->second->path());
walFiles_.erase(it->first);
}
}
// Make sure there is no gap in the logs
if (!walFiles_.empty()) {
LogID logIdAfterLastGap = -1;
auto it = walFiles_.begin();
LogID prevLastId = it->second->lastId();
for (++it; it != walFiles_.end(); ++it) {
if (it->second->firstId() > prevLastId + 1) {
// Found a gap
LOG(ERROR) << "Found a log id gap before "
<< it->second->firstId()
<< ", the previous log id is " << prevLastId;
logIdAfterLastGap = it->second->firstId();
}
prevLastId = it->second->lastId();
}
if (logIdAfterLastGap > 0) {
// Found gap, remove all logs before the last gap
it = walFiles_.begin();
while (it->second->firstId() < logIdAfterLastGap) {
LOG(INFO) << "Removing the wal file \""
<< it->second->path() << "\"";
unlink(it->second->path());
it = walFiles_.erase(it);
}
}
}
}
void FileBasedWal::closeCurrFile() {
if (currFd_ < 0) {
// Already closed
CHECK(!currInfo_);
return;
}
CHECK_EQ(fsync(currFd_), 0) << strerror(errno);
// Close the file
CHECK_EQ(close(currFd_), 0) << strerror(errno);
currFd_ = -1;
auto now = time::WallClock::fastNowInSec();
currInfo_->setMTime(now);
// DCHECK_EQ(currInfo_->size(), FileUtils::fileSize(currInfo_->path()))
// << currInfo_->path() << " size does not match";
struct utimbuf timebuf;
timebuf.modtime = currInfo_->mtime();
timebuf.actime = currInfo_->mtime();
VLOG(1) << "Close cur file " << currInfo_->path() << ", mtime: " << currInfo_->mtime();
CHECK_EQ(utime(currInfo_->path(), &timebuf), 0);
currInfo_.reset();
}
void FileBasedWal::prepareNewFile(LogID startLogId) {
CHECK_LT(currFd_, 0)
<< "The current file needs to be closed first";
// Prepare the last entry in walFiles_
WalFileInfoPtr info = std::make_shared<WalFileInfo>(
FileUtils::joinPath(dir_,
folly::stringPrintf("%019ld.wal", startLogId)),
startLogId);
VLOG(1) << idStr_ << "Write new file " << info->path();
walFiles_.emplace(std::make_pair(startLogId, info));
// Create the file for write
currFd_ = open(
info->path(),
O_CREAT | O_EXCL | O_WRONLY | O_APPEND | O_CLOEXEC | O_LARGEFILE,
0644);
if (currFd_ < 0) {
LOG(FATAL) << "Failed to open file \"" << info->path()
<< "\" (errno: " << errno << "): "
<< strerror(errno);
}
currInfo_ = info;
}
void FileBasedWal::rollbackInFile(WalFileInfoPtr info, LogID logId) {
auto path = info->path();
int32_t fd = open(path, O_RDWR);
if (fd < 0) {
LOG(FATAL) << "Failed to open file \"" << path
<< "\" (errno: " << errno << "): "
<< strerror(errno);
}
size_t pos = 0;
LogID id = 0;
TermID term = 0;
while (true) {
// Read the log Id
if (pread(fd, &id, sizeof(LogID), pos) != sizeof(LogID)) {
LOG(ERROR) << "Failed to read the log id (errno "
<< errno << "): " << strerror(errno);
break;
}
// Read the term Id
if (pread(fd, &term, sizeof(TermID), pos + sizeof(LogID)) != sizeof(TermID)) {
LOG(ERROR) << "Failed to read the term id (errno "
<< errno << "): " << strerror(errno);
break;
}
// Read the message length
int32_t len;
if (pread(fd, &len, sizeof(int32_t), pos + sizeof(LogID) + sizeof(TermID))
!= sizeof(int32_t)) {
LOG(ERROR) << "Failed to read the message length (errno "
<< errno << "): " << strerror(errno);
break;
}
// Move to the next log
pos += sizeof(LogID)
+ sizeof(TermID)
+ sizeof(ClusterID)
+ 2 * sizeof(int32_t)
+ len;
if (id == logId) {
break;
}
}
if (id != logId) {
LOG(FATAL) << idStr_ << "Didn't found log " << logId << " in " << path;
}
lastLogId_ = logId;
lastLogTerm_ = term;
LOG(INFO) << idStr_ << "Rollback to log " << logId;
CHECK_GT(pos, 0) << "This wal should have been deleted";
if (pos < FileUtils::fileSize(path)) {
LOG(INFO) << idStr_ << "Need to truncate from offset " << pos;
if (ftruncate(fd, pos) < 0) {
LOG(FATAL) << "Failed to truncate file \"" << path
<< "\" (errno: " << errno << "): "
<< strerror(errno);
}
info->setSize(pos);
}
info->setLastId(id);
info->setLastTerm(term);
close(fd);
}
void FileBasedWal::scanLastWal(WalFileInfoPtr info, LogID firstId) {
auto* path = info->path();
int32_t fd = open(path, O_RDWR);
if (fd < 0) {
LOG(FATAL) << "Failed to open file \"" << path
<< "\" (errno: " << errno << "): "
<< strerror(errno);
}
LogID curLogId = firstId;
size_t pos = 0;
LogID id = 0;
TermID term = 0;
int32_t head = 0;
int32_t foot = 0;
while (true) {
// Read the log Id
if (pread(fd, &id, sizeof(LogID), pos) != sizeof(LogID)) {
break;
}
if (id != curLogId) {
LOG(ERROR) << "LogId is not consistent" << id << " " << curLogId;
break;
}
// Read the term Id
if (pread(fd, &term, sizeof(TermID), pos + sizeof(LogID)) != sizeof(TermID)) {
break;
}
// Read the message length
if (pread(fd, &head, sizeof(int32_t), pos + sizeof(LogID) + sizeof(TermID))
!= sizeof(int32_t)) {
break;
}
if (pread(fd, &foot, sizeof(int32_t),
pos + sizeof(LogID) + sizeof(TermID) + sizeof(int32_t) + sizeof(ClusterID) + head)
!= sizeof(int32_t)) {
break;
}
if (head != foot) {
LOG(ERROR) << "Message size doen't match: " << head << " != " << foot;
break;
}
info->setLastTerm(term);
info->setLastId(id);
// Move to the next log
pos += sizeof(LogID)
+ sizeof(TermID)
+ sizeof(ClusterID)
+ sizeof(int32_t)
+ head
+ sizeof(int32_t);
++curLogId;
}
LOG(INFO) << idStr_ << "Scan last wal " << path << ", last wal id is " << id;
if (0 < pos && pos < FileUtils::fileSize(path)) {
LOG(WARNING) << "Invalid wal " << path << ", truncate from offset " << pos;
if (ftruncate(fd, pos) < 0) {
LOG(FATAL) << "Failed to truncate file \"" << path
<< "\" (errno: " << errno << "): "
<< strerror(errno);
}
info->setSize(pos);
}
close(fd);
}
BufferPtr FileBasedWal::getLastBuffer(LogID id, size_t expectedToWrite) {
std::unique_lock<std::mutex> g(buffersMutex_);
if (!buffers_.empty()) {
if (buffers_.back()->size() + expectedToWrite <= maxBufferSize_) {
return buffers_.back();
}
// Need to rollover to a new buffer
if (buffers_.size() == policy_.numBuffers) {
// Need to pop the first one
buffers_.pop_front();
}
CHECK_LT(buffers_.size(), policy_.numBuffers);
}
buffers_.emplace_back(std::make_shared<InMemoryLogBuffer>(id));
return buffers_.back();
}
bool FileBasedWal::appendLogInternal(LogID id,
TermID term,
ClusterID cluster,
std::string msg) {
if (stopped_) {
LOG(ERROR) << idStr_ << "WAL has stopped. Do not accept logs any more";
return false;
}
if (lastLogId_ != 0 && firstLogId_ != 0 && id != lastLogId_ + 1) {
LOG(ERROR) << idStr_ << "There is a gap in the log id. The last log id is "
<< lastLogId_
<< ", and the id being appended is " << id;
return false;
}
if (!preProcessor_(id, term, cluster, msg)) {
LOG(ERROR) << idStr_ << "Pre process failed for log " << id;
return false;
}
// Write to the WAL file first
std::string strBuf;
strBuf.reserve(sizeof(LogID)
+ sizeof(TermID)
+ sizeof(ClusterID)
+ msg.size()
+ 2 * sizeof(int32_t));
strBuf.append(reinterpret_cast<char*>(&id), sizeof(LogID));
strBuf.append(reinterpret_cast<char*>(&term), sizeof(TermID));
int32_t len = msg.size();
strBuf.append(reinterpret_cast<char*>(&len), sizeof(int32_t));
strBuf.append(reinterpret_cast<char*>(&cluster), sizeof(ClusterID));
strBuf.append(reinterpret_cast<const char*>(msg.data()), msg.size());
strBuf.append(reinterpret_cast<char*>(&len), sizeof(int32_t));
// Prepare the WAL file if it's not opened
if (currFd_ < 0) {
prepareNewFile(id);
} else if (currInfo_->size() + strBuf.size() > maxFileSize_) {
// Need to roll over
closeCurrFile();
std::lock_guard<std::mutex> g(walFilesMutex_);
prepareNewFile(id);
}
ssize_t bytesWritten = write(currFd_, strBuf.data(), strBuf.size());
if (bytesWritten != (ssize_t)strBuf.size()) {
LOG(FATAL) << idStr_ << "bytesWritten:" << bytesWritten << ", expected:" << strBuf.size()
<< ", error:" << strerror(errno);
}
currInfo_->setSize(currInfo_->size() + strBuf.size());
currInfo_->setLastId(id);
currInfo_->setLastTerm(term);
lastLogId_ = id;
lastLogTerm_ = term;
if (firstLogId_ == 0) {
firstLogId_ = id;
}
// Append to the in-memory buffer
auto buffer = getLastBuffer(id, strBuf.size());
DCHECK_EQ(id, static_cast<int64_t>(buffer->firstLogId() + buffer->numLogs()));
buffer->push(term, cluster, std::move(msg));
return true;
}
bool FileBasedWal::appendLog(LogID id,
TermID term,
ClusterID cluster,
std::string msg) {
if (!appendLogInternal(id, term, cluster, std::move(msg))) {
LOG(ERROR) << "Failed to append log for logId " << id;
return false;
}
return true;
}
bool FileBasedWal::appendLogs(LogIterator& iter) {
for (; iter.valid(); ++iter) {
if (!appendLogInternal(iter.logId(),
iter.logTerm(),
iter.logSource(),
iter.logMsg().toString())) {
LOG(ERROR) << idStr_ << "Failed to append log for logId "
<< iter.logId();
return false;
}
}
return true;
}
std::unique_ptr<LogIterator> FileBasedWal::iterator(LogID firstLogId,
LogID lastLogId) {
return std::make_unique<FileBasedWalIterator>(shared_from_this(), firstLogId, lastLogId);
}
bool FileBasedWal::linkCurrentWAL(const char* newPath) {
closeCurrFile();
std::lock_guard<std::mutex> g(walFilesMutex_);
if (walFiles_.empty()) {
LOG(INFO) << idStr_ << "Create link failed, there is no wal files!";
return false;
}
auto it = walFiles_.rbegin();
if (link(it->second->path(), newPath) != 0) {
LOG(INFO) << idStr_ << "Create link failed for " << it->second->path()
<< " on " << newPath << ", error:" << strerror(errno);
return false;
}
LOG(INFO) << idStr_ << "Create link success for " << it->second->path()
<< " on " << newPath;
return true;
}
bool FileBasedWal::rollbackToLog(LogID id) {
if (id < firstLogId_ - 1 || id > lastLogId_) {
LOG(ERROR) << idStr_ << "Rollback target id " << id
<< " is not in the range of ["
<< firstLogId_ << ","
<< lastLogId_ << "] of WAL";
return false;
}
folly::RWSpinLock::WriteHolder holder(rollbackLock_);
//-----------------------
// 1. Roll back WAL files
//-----------------------
// First close the current file
closeCurrFile();
{
std::lock_guard<std::mutex> g(walFilesMutex_);
if (!walFiles_.empty()) {
auto it = walFiles_.upper_bound(id);
// We need to remove wal files whose entire log range
// are rolled back
while (it != walFiles_.end()) {
// Need to remove the file
VLOG(1) << "Removing file " << it->second->path();
unlink(it->second->path());
it = walFiles_.erase(it);
}
}
if (walFiles_.empty()) {
// All WAL files are gone
CHECK(id == firstLogId_ - 1 || id == 0);
firstLogId_ = 0;
lastLogId_ = 0;
lastLogTerm_ = 0;
} else {
VLOG(1) << "Roll back to log " << id
<< ", the last WAL file is now \""
<< walFiles_.rbegin()->second->path() << "\"";
rollbackInFile(walFiles_.rbegin()->second, id);
}
}
//------------------------------
// 2. Roll back in-memory buffers
//------------------------------
{
std::unique_lock<std::mutex> g(buffersMutex_);
buffers_.clear();
}
return true;
}
bool FileBasedWal::reset() {
closeCurrFile();
{
std::lock_guard<std::mutex> g(buffersMutex_);
buffers_.clear();
}
{
std::lock_guard<std::mutex> g(walFilesMutex_);
walFiles_.clear();
}
std::vector<std::string> files =
FileUtils::listAllFilesInDir(dir_.c_str(), false, "*.wal");
for (auto& fn : files) {
auto absFn = FileUtils::joinPath(dir_, fn);
LOG(INFO) << "Removing " << absFn;
unlink(absFn.c_str());
}
lastLogId_ = firstLogId_ = 0;
return true;
}
void FileBasedWal::cleanWAL(int32_t ttl) {
std::lock_guard<std::mutex> g(walFilesMutex_);
if (walFiles_.empty()) {
return;
}
auto now = time::WallClock::fastNowInSec();
// We skip the latest wal file because it is beging written now.
size_t index = 0;
auto it = walFiles_.begin();
auto size = walFiles_.size();
int count = 0;
int walTTL = ttl == 0 ? policy_.ttl : ttl;
while (it != walFiles_.end()) {
if (index++ < size - 1 && (now - it->second->mtime() > walTTL)) {
VLOG(1) << "Clean wals, Remove " << it->second->path() << ", now: " << now
<< ", mtime: " << it->second->mtime();
unlink(it->second->path());
it = walFiles_.erase(it);
count++;
} else {
++it;
}
}
if (count > 0) {
LOG(INFO) << idStr_ << "Clean wals number " << count;
}
firstLogId_ = walFiles_.begin()->second->firstId();
}
size_t FileBasedWal::accessAllWalInfo(std::function<bool(WalFileInfoPtr info)> fn) const {
std::lock_guard<std::mutex> g(walFilesMutex_);
size_t count = 0;
for (auto it = walFiles_.rbegin(); it != walFiles_.rend(); ++it) {
++count;
if (!fn(it->second)) {
break;
}
}
return count;
}
size_t FileBasedWal::accessAllBuffers(std::function<bool(BufferPtr buffer)> fn) const {
std::lock_guard<std::mutex> g(buffersMutex_);
size_t count = 0;
for (auto it = buffers_.rbegin(); it != buffers_.rend(); ++it) {
++count;
if (!fn(*it)) {
break;
}
}
return count;
}
} // namespace wal
} // namespace nebula
| 1 | 24,016 | auto targetFile = fs::FileUtils::joinPath(newPath, folly::stringPrintf("%019ld.wal", it->first)); | vesoft-inc-nebula | cpp |
@@ -42,6 +42,16 @@ public class SnapshotUtil {
return ancestorIds(table.currentSnapshot(), table::snapshot);
}
+ /**
+ * @return List of snapshot ids in the range - (fromSnapshotId, toSnapshotId]
+ * This method assumes that fromSnapshotId is an ancestor of toSnapshotId
+ */
+ public static List<Long> snapshotIdsBetween(Table table, long fromSnapshotId, long toSnapshotId) {
+ List<Long> snapshotIds = Lists.newArrayList(ancestorIds(table.snapshot(toSnapshotId),
+ snapshotId -> snapshotId != fromSnapshotId ? table.snapshot(snapshotId) : null));
+ return snapshotIds;
+ }
+
public static List<Long> ancestorIds(Snapshot snapshot, Function<Long, Snapshot> lookup) {
List<Long> ancestorIds = Lists.newArrayList();
Snapshot current = snapshot; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.util;
import com.google.common.collect.Lists;
import java.util.List;
import java.util.function.Function;
import org.apache.iceberg.Snapshot;
import org.apache.iceberg.Table;
public class SnapshotUtil {
private SnapshotUtil() {
}
/**
* Return the snapshot IDs for the ancestors of the current table state.
* <p>
* Ancestor IDs are ordered by commit time, descending. The first ID is the current snapshot, followed by its parent,
* and so on.
*
* @param table a {@link Table}
* @return a set of snapshot IDs of the known ancestor snapshots, including the current ID
*/
public static List<Long> currentAncestors(Table table) {
return ancestorIds(table.currentSnapshot(), table::snapshot);
}
public static List<Long> ancestorIds(Snapshot snapshot, Function<Long, Snapshot> lookup) {
List<Long> ancestorIds = Lists.newArrayList();
Snapshot current = snapshot;
while (current != null) {
ancestorIds.add(current.snapshotId());
if (current.parentId() != null) {
current = lookup.apply(current.parentId());
} else {
current = null;
}
}
return ancestorIds;
}
}
| 1 | 14,714 | I don't see any place that checks whether the from snapshot is an ancestor of the to snapshot. That seems like a requirement for this to work correctly to me. | apache-iceberg | java |
@@ -367,11 +367,10 @@ class Connection implements TranslatorAwareInterface, LoggerAwareInterface
explode(':', $functionConfig['updateFields'])
);
}
- if (isset($functionConfig['helpText'])) {
- $response['helpText'] = $this->getHelpText(
- $functionConfig['helpText']
- );
- }
+ $response['helpText']
+ = $this->getHelpText($functionConfig['helpText'] ?? '');
+ $response['updateHelpText']
+ = $this->getHelpText($functionConfig['updateHelpText'] ?? '');
if (isset($functionConfig['consortium'])) {
$response['consortium'] = $functionConfig['consortium'];
} | 1 | <?php
/**
* Catalog Connection Class
*
* This wrapper works with a driver class to pass information from the ILS to
* VuFind.
*
* PHP version 7
*
* Copyright (C) Villanova University 2007.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package ILS_Drivers
* @author Andrew S. Nagy <vufind-tech@lists.sourceforge.net>
* @author Demian Katz <demian.katz@villanova.edu>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:ils_drivers Wiki
*/
namespace VuFind\ILS;
use Laminas\Log\LoggerAwareInterface;
use VuFind\Exception\BadConfig;
use VuFind\Exception\ILS as ILSException;
use VuFind\I18n\Translator\TranslatorAwareInterface;
use VuFind\ILS\Driver\DriverInterface;
/**
* Catalog Connection Class
*
* This wrapper works with a driver class to pass information from the ILS to
* VuFind.
*
* @category VuFind
* @package ILS_Drivers
* @author Andrew S. Nagy <vufind-tech@lists.sourceforge.net>
* @author Demian Katz <demian.katz@villanova.edu>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:ils_drivers Wiki
*/
class Connection implements TranslatorAwareInterface, LoggerAwareInterface
{
use \VuFind\I18n\Translator\TranslatorAwareTrait;
use \VuFind\Log\LoggerAwareTrait;
/**
* Has the driver been initialized yet?
*
* @var bool
*/
protected $driverInitialized = false;
/**
* The object of the appropriate driver.
*
* @var object
*/
protected $driver = null;
/**
* ILS configuration
*
* @var \Laminas\Config\Config
*/
protected $config;
/**
* Holds mode
*
* @var string
*/
protected $holdsMode = 'disabled';
/**
* Title-level holds mode
*
* @var string
*/
protected $titleHoldsMode = 'disabled';
/**
* Driver plugin manager
*
* @var \VuFind\ILS\Driver\PluginManager
*/
protected $driverManager;
/**
* Configuration loader
*
* @var \VuFind\Config\PluginManager
*/
protected $configReader;
/**
* Is the current ILS driver failing?
*
* @var bool
*/
protected $failing = false;
/**
* Request object
*
* @var \Laminas\Http\Request
*/
protected $request;
/**
* Constructor
*
* @param \Laminas\Config\Config $config Configuration
* representing the [Catalog] section of config.ini
* @param \VuFind\ILS\Driver\PluginManager $driverManager Driver plugin manager
* @param \VuFind\Config\PluginManager $configReader Configuration loader
* @param \Laminas\Http\Request $request Request object
*/
public function __construct(
\Laminas\Config\Config $config,
\VuFind\ILS\Driver\PluginManager $driverManager,
\VuFind\Config\PluginManager $configReader,
\Laminas\Http\Request $request = null
) {
if (!isset($config->driver)) {
throw new \Exception('ILS driver setting missing.');
}
if (!$driverManager->has($config->driver)) {
throw new \Exception('ILS driver missing: ' . $config->driver);
}
$this->config = $config;
$this->configReader = $configReader;
$this->driverManager = $driverManager;
$this->request = $request;
}
/**
* Set the hold configuration for the connection.
*
* @param \VuFind\ILS\HoldSettings $settings Hold settings
*
* @return Connection
*/
public function setHoldConfig($settings)
{
$this->holdsMode = $settings->getHoldsMode();
$this->titleHoldsMode = $settings->getTitleHoldsMode();
return $this;
}
/**
* Get class name of the driver object.
*
* @return string
*/
public function getDriverClass()
{
return get_class($this->getDriver(false));
}
/**
* Initialize the ILS driver.
*
* @return void
*/
protected function initializeDriver()
{
try {
$this->driver->setConfig($this->getDriverConfig());
} catch (\Exception $e) {
// Any errors thrown during configuration should be cast to BadConfig
// so we can handle them differently from other runtime problems.
throw $e instanceof BadConfig
? $e
: new BadConfig('Failure during configuration.', 0, $e);
}
$this->driver->init();
$this->driverInitialized = true;
}
/**
* Are we configured to fail over to the NoILS driver on error?
*
* @return bool
*/
protected function hasNoILSFailover()
{
// If we're configured to fail over to the NoILS driver, do so now:
return isset($this->config->loadNoILSOnFailure)
&& $this->config->loadNoILSOnFailure;
}
/**
* If configured, fail over to the NoILS driver and return true; otherwise,
* return false.
*
* @param \Exception $e The exception that triggered the failover.
*
* @return bool
*/
protected function failOverToNoILS(\Exception $e = null)
{
// If the exception is caused by a configuration error, the administrator
// needs to fix it, but failing over to NoILS will mask the error and cause
// confusion. We shouldn't do that!
if ($e instanceof BadConfig) {
return false;
}
// If we got this far, we want to proceed with failover...
$this->failing = true;
// Only fail over if we're configured to allow it and we haven't already
// done so!
if ($this->hasNoILSFailover()) {
$noILS = $this->driverManager->get('NoILS');
if (get_class($noILS) != $this->getDriverClass()) {
$this->setDriver($noILS);
$this->initializeDriver();
return true;
}
}
return false;
}
/**
* Get access to the driver object.
*
* @param bool $init Should we initialize the driver (if necessary), or load it
* "as-is"?
*
* @throws \Exception
* @return object
*/
public function getDriver($init = true)
{
if (null === $this->driver) {
$this->setDriver($this->driverManager->get($this->config->driver));
}
if (!$this->driverInitialized && $init) {
try {
$this->initializeDriver();
} catch (\Exception $e) {
if (!$this->failOverToNoILS($e)) {
throw $e;
}
}
}
return $this->driver;
}
/**
* Set a driver object.
*
* @param DriverInterface $driver Driver to set.
* @param bool $initialized Is this driver already initialized?
*
* @return void
*/
public function setDriver(DriverInterface $driver, $initialized = false)
{
$this->driverInitialized = $initialized;
$this->driver = $driver;
}
/**
* Get configuration for the ILS driver. We will load an .ini file named
* after the driver class if it exists; otherwise we will return an empty
* array.
*
* @return array
*/
public function getDriverConfig()
{
// Determine config file name based on class name:
$parts = explode('\\', $this->getDriverClass());
$config = $this->configReader->get(end($parts));
return is_object($config) ? $config->toArray() : [];
}
/**
* Check Function
*
* This is responsible for checking the driver configuration to determine
* if the system supports a particular function.
*
* @param string $function The name of the function to check.
* @param array $params (optional) An array of function-specific parameters
*
* @return mixed On success, an associative array with specific function keys
* and values; on failure, false.
*/
public function checkFunction($function, $params = null)
{
try {
// Extract the configuration from the driver if available:
$functionConfig = $this->checkCapability(
'getConfig',
[$function, $params],
true
) ? $this->getDriver()->getConfig($function, $params) : false;
// See if we have a corresponding check method to analyze the response:
$checkMethod = "checkMethod" . $function;
if (!method_exists($this, $checkMethod)) {
return false;
}
// Send back the settings:
return $this->$checkMethod($functionConfig, $params);
} catch (ILSException $e) {
$this->logError(
"checkFunction($function) with params: " . print_r($params, true)
. ' failed: ' . $e->getMessage()
);
return false;
}
}
/**
* Check Holds
*
* A support method for checkFunction(). This is responsible for checking
* the driver configuration to determine if the system supports Holds.
*
* @param array $functionConfig The Hold configuration values
* @param array $params An array of function-specific params (or null)
*
* @return mixed On success, an associative array with specific function keys
* and values either for placing holds via a form or a URL; on failure, false.
*/
protected function checkMethodHolds($functionConfig, $params)
{
$response = false;
// We pass an array containing $params to checkCapability since $params
// should contain 'id' and 'patron' keys; this isn't exactly the same as
// the full parameter expected by placeHold() but should contain the
// necessary details for determining eligibility.
if ($this->getHoldsMode() != "none"
&& $this->checkCapability('placeHold', [$params ?: []])
&& isset($functionConfig['HMACKeys'])
) {
$response = ['function' => "placeHold"];
$response['HMACKeys'] = explode(":", $functionConfig['HMACKeys']);
if (isset($functionConfig['defaultRequiredDate'])) {
$response['defaultRequiredDate']
= $functionConfig['defaultRequiredDate'];
}
if (isset($functionConfig['extraHoldFields'])) {
$response['extraHoldFields'] = $functionConfig['extraHoldFields'];
}
if (!empty($functionConfig['updateFields'])) {
$response['updateFields'] = array_map(
'trim',
explode(':', $functionConfig['updateFields'])
);
}
if (isset($functionConfig['helpText'])) {
$response['helpText'] = $this->getHelpText(
$functionConfig['helpText']
);
}
if (isset($functionConfig['consortium'])) {
$response['consortium'] = $functionConfig['consortium'];
}
$response['pickUpLocationCheckLimit']
= intval($functionConfig['pickUpLocationCheckLimit'] ?? 0);
} else {
$id = $params['id'] ?? null;
if ($this->checkCapability('getHoldLink', [$id, []])) {
$response = ['function' => "getHoldLink"];
}
}
return $response;
}
/**
* Check Cancel Holds
*
* A support method for checkFunction(). This is responsible for checking
* the driver configuration to determine if the system supports Cancelling Holds.
*
* @param array $functionConfig The Cancel Hold configuration values
* @param array $params An array of function-specific params (or null)
*
* @return mixed On success, an associative array with specific function keys
* and values either for cancelling holds via a form or a URL;
* on failure, false.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
protected function checkMethodcancelHolds($functionConfig, $params)
{
$response = false;
// We can't pass exactly accurate parameters to checkCapability in this
// context, so we'll just pass along $params as the best available
// approximation.
if (isset($this->config->cancel_holds_enabled)
&& $this->config->cancel_holds_enabled == true
&& $this->checkCapability('cancelHolds', [$params ?: []])
) {
$response = ['function' => "cancelHolds"];
} elseif (isset($this->config->cancel_holds_enabled)
&& $this->config->cancel_holds_enabled == true
&& $this->checkCapability('getCancelHoldLink', [$params ?: []])
) {
$response = ['function' => "getCancelHoldLink"];
}
return $response;
}
/**
* Check Renewals
*
* A support method for checkFunction(). This is responsible for checking
* the driver configuration to determine if the system supports Renewing Items.
*
* @param array $functionConfig The Renewal configuration values
* @param array $params An array of function-specific params (or null)
*
* @return mixed On success, an associative array with specific function keys
* and values either for renewing items via a form or a URL; on failure, false.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
protected function checkMethodRenewals($functionConfig, $params)
{
$response = false;
// We can't pass exactly accurate parameters to checkCapability in this
// context, so we'll just pass along $params as the best available
// approximation.
if (isset($this->config->renewals_enabled)
&& $this->config->renewals_enabled == true
&& $this->checkCapability('renewMyItems', [$params ?: []])
) {
$response = ['function' => "renewMyItems"];
} elseif (isset($this->config->renewals_enabled)
&& $this->config->renewals_enabled == true
&& $this->checkCapability('renewMyItemsLink', [$params ?: []])
) {
$response = ['function' => "renewMyItemsLink"];
}
return $response;
}
/**
* Check Storage Retrieval Request
*
* A support method for checkFunction(). This is responsible for checking
* the driver configuration to determine if the system supports storage
* retrieval requests.
*
* @param array $functionConfig The storage retrieval request configuration
* values
* @param array $params An array of function-specific params (or null)
*
* @return mixed On success, an associative array with specific function keys
* and values either for placing requests via a form; on failure, false.
*/
protected function checkMethodStorageRetrievalRequests($functionConfig, $params)
{
$response = false;
// $params doesn't include all of the keys used by
// placeStorageRetrievalRequest, but it is the best we can do in the context.
$check = $this->checkCapability(
'placeStorageRetrievalRequest',
[$params ?: []]
);
if ($check && isset($functionConfig['HMACKeys'])) {
$response = ['function' => 'placeStorageRetrievalRequest'];
$response['HMACKeys'] = explode(':', $functionConfig['HMACKeys']);
if (isset($functionConfig['extraFields'])) {
$response['extraFields'] = $functionConfig['extraFields'];
}
if (isset($functionConfig['helpText'])) {
$response['helpText'] = $this->getHelpText(
$functionConfig['helpText']
);
}
}
return $response;
}
/**
* Check Cancel Storage Retrieval Requests
*
* A support method for checkFunction(). This is responsible for checking
* the driver configuration to determine if the system supports Cancelling
* Storage Retrieval Requests.
*
* @param array $functionConfig The Cancel function configuration values
* @param array $params An array of function-specific params (or null)
*
* @return mixed On success, an associative array with specific function keys
* and values either for cancelling requests via a form or a URL;
* on failure, false.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
protected function checkMethodcancelStorageRetrievalRequests(
$functionConfig,
$params
) {
$response = false;
if (isset($this->config->cancel_storage_retrieval_requests_enabled)
&& $this->config->cancel_storage_retrieval_requests_enabled
) {
$check = $this->checkCapability(
'cancelStorageRetrievalRequests',
[$params ?: []]
);
if ($check) {
$response = ['function' => 'cancelStorageRetrievalRequests'];
} else {
$cancelParams = [
$params ?: [],
$params['patron'] ?? null
];
$check2 = $this->checkCapability(
'getCancelStorageRetrievalRequestLink',
$cancelParams
);
if ($check2) {
$response = [
'function' => 'getCancelStorageRetrievalRequestLink'
];
}
}
}
return $response;
}
/**
* Check ILL Request
*
* A support method for checkFunction(). This is responsible for checking
* the driver configuration to determine if the system supports storage
* retrieval requests.
*
* @param array $functionConfig The ILL request configuration values
* @param array $params An array of function-specific params (or null)
*
* @return mixed On success, an associative array with specific function keys
* and values either for placing requests via a form; on failure, false.
*/
protected function checkMethodILLRequests($functionConfig, $params)
{
$response = false;
// $params doesn't include all of the keys used by
// placeILLRequest, but it is the best we can do in the context.
if ($this->checkCapability('placeILLRequest', [$params ?: []])
&& isset($functionConfig['HMACKeys'])
) {
$response = ['function' => 'placeILLRequest'];
if (isset($functionConfig['defaultRequiredDate'])) {
$response['defaultRequiredDate']
= $functionConfig['defaultRequiredDate'];
}
$response['HMACKeys'] = explode(':', $functionConfig['HMACKeys']);
if (isset($functionConfig['extraFields'])) {
$response['extraFields'] = $functionConfig['extraFields'];
}
if (isset($functionConfig['helpText'])) {
$response['helpText'] = $this->getHelpText(
$functionConfig['helpText']
);
}
}
return $response;
}
/**
* Check Cancel ILL Requests
*
* A support method for checkFunction(). This is responsible for checking
* the driver configuration to determine if the system supports Cancelling
* ILL Requests.
*
* @param array $functionConfig The Cancel function configuration values
* @param array $params An array of function-specific params (or null)
*
* @return mixed On success, an associative array with specific function keys
* and values either for cancelling requests via a form or a URL;
* on failure, false.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
protected function checkMethodcancelILLRequests($functionConfig, $params)
{
$response = false;
if (isset($this->config->cancel_ill_requests_enabled)
&& $this->config->cancel_ill_requests_enabled
) {
$check = $this->checkCapability(
'cancelILLRequests',
[$params ?: []]
);
if ($check) {
$response = ['function' => 'cancelILLRequests'];
} else {
$cancelParams = [
$params ?: [],
$params['patron'] ?? null
];
$check2 = $this->checkCapability(
'getCancelILLRequestLink',
$cancelParams
);
if ($check2) {
$response = [
'function' => 'getCancelILLRequestLink'
];
}
}
}
return $response;
}
/**
* Check Password Change
*
* A support method for checkFunction(). This is responsible for checking
* the driver configuration to determine if the system supports changing
* password.
*
* @param array $functionConfig The password change configuration values
* @param array $params Patron data
*
* @return mixed On success, an associative array with specific function keys
* and values either for cancelling requests via a form or a URL;
* on failure, false.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
protected function checkMethodchangePassword($functionConfig, $params)
{
if ($this->checkCapability('changePassword', [$params ?: []])) {
return ['function' => 'changePassword'];
}
return false;
}
/**
* Check Current Loans
*
* A support method for checkFunction(). This is responsible for checking
* the driver configuration to determine if the system supports current
* loans.
*
* @param array $functionConfig Function configuration
* @param array $params Patron data
*
* @return mixed On success, an associative array with specific function keys
* and values; on failure, false.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
protected function checkMethodgetMyTransactions($functionConfig, $params)
{
if ($this->checkCapability('getMyTransactions', [$params ?: []])) {
return $functionConfig;
}
return false;
}
/**
* Check Historic Loans
*
* A support method for checkFunction(). This is responsible for checking
* the driver configuration to determine if the system supports historic
* loans.
*
* @param array $functionConfig Function configuration
* @param array $params Patron data
*
* @return mixed On success, an associative array with specific function keys
* and values; on failure, false.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
protected function checkMethodgetMyTransactionHistory($functionConfig, $params)
{
if ($this->checkCapability('getMyTransactionHistory', [$params ?: []])) {
return $functionConfig;
}
return false;
}
/**
* Check Patron login
*
* A support method for checkFunction(). This is responsible for checking
* the driver configuration to determine if the system supports patron login.
* It is currently assumed that all drivers do.
*
* @param array $functionConfig The patronLogin configuration values
* @param array $params An array of function-specific params (or null)
*
* @return mixed On success, an associative array with specific function keys
* and values for login; on failure, false.
*/
protected function checkMethodpatronLogin($functionConfig, $params)
{
return $functionConfig;
}
/**
* Get proper help text from the function config
*
* @param string|array $helpText Help text(s)
*
* @return string Language-specific help text
*/
protected function getHelpText($helpText)
{
if (is_array($helpText)) {
$lang = $this->getTranslatorLocale();
return $helpText[$lang] ?? '';
}
return $helpText;
}
/**
* Check Request is Valid
*
* This is responsible for checking if a request is valid from hold.php
*
* @param string $id A Bibliographic ID
* @param array $data Collected Holds Data
* @param array $patron Patron related data
*
* @return mixed The result of the checkRequestIsValid function if it
* exists, true if it does not
*/
public function checkRequestIsValid($id, $data, $patron)
{
try {
$params = [$id, $data, $patron];
if ($this->checkCapability('checkRequestIsValid', $params)) {
return $this->getDriver()->checkRequestIsValid($id, $data, $patron);
}
} catch (\Exception $e) {
if ($this->failOverToNoILS($e)) {
return call_user_func_array([$this, __METHOD__], func_get_args());
}
throw $e;
}
// If the driver has no checkRequestIsValid method, we will assume that
// all requests are valid - failure can be handled later after the user
// attempts to place an illegal hold
return true;
}
/**
* Check Storage Retrieval Request is Valid
*
* This is responsible for checking if a storage retrieval request is valid
*
* @param string $id A Bibliographic ID
* @param array $data Collected Holds Data
* @param array $patron Patron related data
*
* @return mixed The result of the checkStorageRetrievalRequestIsValid
* function if it exists, false if it does not
*/
public function checkStorageRetrievalRequestIsValid($id, $data, $patron)
{
try {
$check = $this->checkCapability(
'checkStorageRetrievalRequestIsValid',
[$id, $data, $patron]
);
if ($check) {
return $this->getDriver()->checkStorageRetrievalRequestIsValid(
$id,
$data,
$patron
);
}
} catch (\Exception $e) {
if ($this->failOverToNoILS($e)) {
return call_user_func_array([$this, __METHOD__], func_get_args());
}
throw $e;
}
// If the driver has no checkStorageRetrievalRequestIsValid method, we
// will assume that the request is not valid
return false;
}
/**
* Check ILL Request is Valid
*
* This is responsible for checking if an ILL request is valid
*
* @param string $id A Bibliographic ID
* @param array $data Collected Holds Data
* @param array $patron Patron related data
*
* @return mixed The result of the checkILLRequestIsValid
* function if it exists, false if it does not
*/
public function checkILLRequestIsValid($id, $data, $patron)
{
try {
$params = [$id, $data, $patron];
if ($this->checkCapability('checkILLRequestIsValid', $params)) {
return $this->getDriver()->checkILLRequestIsValid(
$id,
$data,
$patron
);
}
} catch (\Exception $e) {
if ($this->failOverToNoILS($e)) {
return call_user_func_array([$this, __METHOD__], func_get_args());
}
throw $e;
}
// If the driver has no checkILLRequestIsValid method, we
// will assume that the request is not valid
return false;
}
/**
* Get Holds Mode
*
* This is responsible for returning the holds mode
*
* @return string The Holds mode
*/
public function getHoldsMode()
{
return $this->holdsMode;
}
/**
* Get Offline Mode
*
* This is responsible for returning the offline mode
*
* @param bool $healthCheck Perform a health check in addition to consulting
* the ILS status?
*
* @return string|bool "ils-offline" for systems where the main ILS is offline,
* "ils-none" for systems which do not use an ILS, false for online systems.
*/
public function getOfflineMode($healthCheck = false)
{
// If we have NoILS failover configured, force driver initialization so
// we can know we are checking the offline mode against the correct driver.
if ($this->hasNoILSFailover()) {
$this->getDriver();
}
// If we need to perform a health check, try to do a random item lookup
// before proceeding.
if ($healthCheck) {
$this->getStatus('1');
}
// If we're encountering failures, let's go into ils-offline mode if
// the ILS driver does not natively support getOfflineMode().
$default = $this->failing ? 'ils-offline' : false;
// Graceful degradation -- return false if no method supported.
return $this->checkCapability('getOfflineMode')
? $this->getDriver()->getOfflineMode() : $default;
}
/**
* Get Title Holds Mode
*
* This is responsible for returning the Title holds mode
*
* @return string The Title Holds mode
*/
public function getTitleHoldsMode()
{
return $this->titleHoldsMode;
}
/**
* Has Holdings
*
* Obtain information on whether or not the item has holdings
*
* @param string $id A bibliographic id
*
* @return bool true on success, false on failure
*/
public function hasHoldings($id)
{
// Graceful degradation -- return true if no method supported.
try {
return $this->checkCapability('hasHoldings', [$id])
? $this->getDriver()->hasHoldings($id) : true;
} catch (\Exception $e) {
if ($this->failOverToNoILS($e)) {
return call_user_func_array([$this, __METHOD__], func_get_args());
}
throw $e;
}
}
/**
* Get Hidden Login Mode
*
* This is responsible for indicating whether login should be hidden.
*
* @return bool true if the login should be hidden, false if not
*/
public function loginIsHidden()
{
// Graceful degradation -- return false if no method supported.
try {
return $this->checkCapability('loginIsHidden')
? $this->getDriver()->loginIsHidden() : false;
} catch (\Exception $e) {
if ($this->failOverToNoILS($e)) {
return call_user_func_array([$this, __METHOD__], func_get_args());
}
throw $e;
}
}
/**
* Check driver capability -- return true if the driver supports the specified
* method; false otherwise.
*
* @param string $method Method to check
* @param array $params Array of passed parameters (optional)
* @param bool $throw Whether to throw exceptions instead of returning false
*
* @return bool
* @throws ILSException
*/
public function checkCapability($method, $params = [], $throw = false)
{
try {
// If we have NoILS failover disabled, we can check capabilities of
// the driver class without wasting time initializing it; if NoILS
// failover is enabled, we have to initialize the driver object now
// to be sure we are checking capabilities on the appropriate class.
$driverToCheck = $this->getDriver($this->hasNoILSFailover());
// First check that the function is callable:
if (is_callable([$driverToCheck, $method])) {
// At least drivers implementing the __call() magic method must also
// implement supportsMethod() to verify that the method is actually
// usable:
if (method_exists($driverToCheck, 'supportsMethod')) {
return $this->getDriver()->supportsMethod($method, $params);
}
return true;
}
} catch (ILSException $e) {
$this->logError(
"checkCapability($method) with params: " . print_r($params, true)
. ' failed: ' . $e->getMessage()
);
if ($throw) {
throw $e;
}
}
// If we got this far, the feature is unsupported:
return false;
}
/**
* Get Names of Textual Holdings Fields
*
* Obtain information on which textual holdings fields should be displayed
*
* @return string[]
*/
public function getHoldingsTextFieldNames()
{
return isset($this->config->holdings_text_fields)
? $this->config->holdings_text_fields->toArray()
: ['holdings_notes', 'summary', 'supplements', 'indexes'];
}
/**
* Get the password policy from the driver
*
* @param array $patron Patron data
*
* @return bool|array Password policy array or false if unsupported
*/
public function getPasswordPolicy($patron)
{
return $this->checkCapability(
'getConfig',
['changePassword', compact('patron')]
) ? $this->getDriver()->getConfig('changePassword', compact('patron'))
: false;
}
/**
* Get Patron Transactions
*
* This is responsible for retrieving all transactions (i.e. checked out items)
* by a specific patron.
*
* @param array $patron The patron array from patronLogin
* @param array $params Parameters
*
* @return mixed Array of the patron's transactions
*/
public function getMyTransactions($patron, $params = [])
{
$result = $this->__call('getMyTransactions', [$patron, $params]);
// Support also older driver return value:
if (!isset($result['count'])) {
$result = [
'count' => count($result),
'records' => $result
];
}
return $result;
}
/**
* Get holdings
*
* Retrieve holdings from ILS driver class and normalize result array if needed.
*
* @param string $id The record id to retrieve the holdings for
* @param array $patron Patron data
* @param array $options Additional options
*
* @return array Array with holding data
*/
public function getHolding($id, $patron = null, $options = [])
{
// Get pagination options for holdings tab:
$params = compact('id', 'patron');
$config = $this->checkCapability('getConfig', ['Holdings', $params])
? $this->getDriver()->getConfig('Holdings', $params) : [];
if (empty($config['itemLimit'])) {
// Use itemLimit in Holds as fallback for backward compatibility:
$config
= $this->checkCapability('getConfig', ['Holds', $params])
? $this->getDriver()->getConfig('Holds', $params) : [];
}
$itemLimit = !empty($config['itemLimit']) ? $config['itemLimit'] : null;
$page = $this->request ? $this->request->getQuery('page', 1) : 1;
$offset = ($itemLimit && is_numeric($itemLimit))
? ($page * $itemLimit) - $itemLimit
: null;
$defaultOptions = compact('page', 'itemLimit', 'offset');
$finalOptions = $options + $defaultOptions;
// Get the holdings from the ILS
$holdings = $this->__call('getHolding', [$id, $patron, $finalOptions]);
// Return all the necessary details:
if (!isset($holdings['holdings'])) {
$holdings = [
'total' => count($holdings),
'holdings' => $holdings,
'electronic_holdings' => [],
];
} else {
if (!isset($holdings['total'])) {
$holdings['total'] = count($holdings['holdings']);
}
if (!isset($holdings['electronic_holdings'])) {
$holdings['electronic_holdings'] = [];
}
}
$holdings['page'] = $finalOptions['page'];
$holdings['itemLimit'] = $finalOptions['itemLimit'];
return $holdings;
}
/**
* Default method -- pass along calls to the driver if available; return
* false otherwise. This allows custom functions to be implemented in
* the driver without constant modification to the connection class.
*
* @param string $methodName The name of the called method.
* @param array $params Array of passed parameters.
*
* @throws ILSException
* @return mixed Varies by method (false if undefined method)
*/
public function __call($methodName, $params)
{
try {
if ($this->checkCapability($methodName, $params)) {
return call_user_func_array(
[$this->getDriver(), $methodName],
$params
);
}
} catch (\Exception $e) {
if ($this->failOverToNoILS($e)) {
return call_user_func_array([$this, __METHOD__], func_get_args());
}
throw $e;
}
throw new ILSException(
'Cannot call method: ' . $this->getDriverClass() . '::' . $methodName
);
}
}
| 1 | 32,506 | Other calls to getHelpText() are wrapped in an `isset()` check. Would it make sense to be consistent, and either restore that here or remove it elsewhere? | vufind-org-vufind | php |
@@ -17,7 +17,7 @@ module RSpec
# @param [IO] out
def run(err, out)
@configuration.error_stream = err
- @configuration.output_stream ||= out
+ @configuration.output_stream = out
@options.configure(@configuration)
@configuration.load_spec_files
@world.announce_filters | 1 | module RSpec
module Core
class CommandLine
def initialize(options, configuration=RSpec::configuration, world=RSpec::world)
if Array === options
options = ConfigurationOptions.new(options)
options.parse_options
end
@options = options
@configuration = configuration
@world = world
end
# Configures and runs a suite
#
# @param [IO] err
# @param [IO] out
def run(err, out)
@configuration.error_stream = err
@configuration.output_stream ||= out
@options.configure(@configuration)
@configuration.load_spec_files
@world.announce_filters
@configuration.reporter.report(@world.example_count, @configuration.randomize? ? @configuration.seed : nil) do |reporter|
begin
@configuration.run_hook(:before, :suite)
@world.example_groups.ordered.map {|g| g.run(reporter) }.all? ? 0 : @configuration.failure_exit_code
ensure
@configuration.run_hook(:after, :suite)
end
end
end
end
end
end
| 1 | 10,363 | Why the change? If `output_stream` is set to something non-nil, it seems odd (and potentially wrong) to overwrite it.... | rspec-rspec-core | rb |
@@ -4,6 +4,7 @@ class Proposal < ActiveRecord::Base
workflow_column :status
has_one :cart
+ belongs_to :clientdata, polymorphic: true
validates :flow, presence: true, inclusion: {in: ApprovalGroup::FLOWS}
| 1 | class Proposal < ActiveRecord::Base
include ThreeStateWorkflow
workflow_column :status
has_one :cart
validates :flow, presence: true, inclusion: {in: ApprovalGroup::FLOWS}
self.statuses.each do |status|
scope status, -> { where(status: status) }
end
scope :closed, -> { where(status: ['approved', 'rejected']) }
after_initialize :set_defaults
def set_defaults
self.flow ||= 'parallel'
end
#### state machine methods ####
# TODO remove dependence on Cart
def on_pending_entry(prev_state, event)
if self.cart.all_approvals_received?
self.approve!
end
end
def on_rejected_entry(prev_state, event)
if prev_state.name != :rejected
Dispatcher.on_cart_rejected(self.cart)
end
end
def restart
# Note that none of the state machine's history is stored
self.cart.api_tokens.update_all(expires_at: Time.now)
self.cart.approver_approvals.each do |approval|
approval.restart!
end
Dispatcher.deliver_new_cart_emails(self.cart)
end
###############################
end
| 1 | 12,639 | Thinking this should have an underscore. | 18F-C2 | rb |
@@ -316,14 +316,14 @@ def test_message_state_scope(init_linter: PyLinter) -> None:
linter = init_linter
linter.disable("C0202")
- assert MSG_STATE_SCOPE_CONFIG == linter.get_message_state_scope("C0202")
+ assert MSG_STATE_SCOPE_CONFIG == linter._get_message_state_scope("C0202")
linter.disable("W0101", scope="module", line=3)
- assert MSG_STATE_SCOPE_CONFIG == linter.get_message_state_scope("C0202")
- assert MSG_STATE_SCOPE_MODULE == linter.get_message_state_scope("W0101", 3)
+ assert MSG_STATE_SCOPE_CONFIG == linter._get_message_state_scope("C0202")
+ assert MSG_STATE_SCOPE_MODULE == linter._get_message_state_scope("W0101", 3)
linter.enable("W0102", scope="module", line=3)
- assert MSG_STATE_SCOPE_MODULE == linter.get_message_state_scope("W0102", 3)
+ assert MSG_STATE_SCOPE_MODULE == linter._get_message_state_scope("W0102", 3)
linter.config = FakeConfig()
- assert MSG_STATE_CONFIDENCE == linter.get_message_state_scope(
+ assert MSG_STATE_CONFIDENCE == linter._get_message_state_scope(
"this-is-bad", confidence=interfaces.INFERENCE
)
| 1 | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2011-2014 Google, Inc.
# Copyright (c) 2012 Kevin Jing Qiu <kevin.jing.qiu@gmail.com>
# Copyright (c) 2012 Anthony VEREZ <anthony.verez.external@cassidian.com>
# Copyright (c) 2012 FELD Boris <lothiraldan@gmail.com>
# Copyright (c) 2013-2018, 2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Noam Yorav-Raphael <noamraph@gmail.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016-2017 Derek Gustafson <degustaf@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Glenn Matthews <glmatthe@cisco.com>
# Copyright (c) 2017-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017 Craig Citro <craigcitro@gmail.com>
# Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2017 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018, 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2018 Matus Valo <matusvalo@users.noreply.github.com>
# Copyright (c) 2018 Scott Worley <scottworley@scottworley.com>
# Copyright (c) 2018 Randall Leeds <randall@bleeds.info>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 Reverb C <reverbc@users.noreply.github.com>
# Copyright (c) 2019 Janne Rönkkö <jannero@users.noreply.github.com>
# Copyright (c) 2019 Trevor Bekolay <tbekolay@gmail.com>
# Copyright (c) 2019 Andres Perez Hortal <andresperezcba@gmail.com>
# Copyright (c) 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2020 Martin Vielsmaier <martin@vielsmaier.net>
# Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2021 Daniël van Noord <13665637+DanielNoord@users.noreply.github.com>
# Copyright (c) 2021 Michal Vasilek <michal@vasilek.cz>
# Copyright (c) 2021 Eisuke Kawashima <e-kwsm@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
# pylint: disable=redefined-outer-name
import os
import re
import sys
import tempfile
from contextlib import contextmanager
from importlib import reload
from io import StringIO
from os import chdir, getcwd
from os.path import abspath, basename, dirname, isdir, join, sep
from shutil import rmtree
from typing import Iterable, Iterator, List, Optional, Tuple
import platformdirs
import pytest
from _pytest.capture import CaptureFixture
from pylint import checkers, config, exceptions, interfaces, lint, testutils
from pylint.checkers.utils import check_messages
from pylint.constants import (
MSG_STATE_CONFIDENCE,
MSG_STATE_SCOPE_CONFIG,
MSG_STATE_SCOPE_MODULE,
)
from pylint.exceptions import InvalidMessageError
from pylint.lint import ArgumentPreprocessingError, PyLinter, Run, preprocess_options
from pylint.message import Message
from pylint.reporters import text
from pylint.typing import MessageLocationTuple
from pylint.utils import FileState, print_full_documentation, tokenize_module
if os.name == "java":
# pylint: disable=no-member
# os._name is valid see https://www.programcreek.com/python/example/3842/os._name
if os.name == "nt":
HOME = "USERPROFILE"
else:
HOME = "HOME"
elif sys.platform == "win32":
HOME = "USERPROFILE"
else:
HOME = "HOME"
@contextmanager
def fake_home() -> Iterator:
folder = tempfile.mkdtemp("fake-home")
old_home = os.environ.get(HOME)
try:
os.environ[HOME] = folder
yield
finally:
os.environ.pop("PYLINTRC", "")
if old_home is None:
del os.environ[HOME]
else:
os.environ[HOME] = old_home
rmtree(folder, ignore_errors=True)
def remove(file):
try:
os.remove(file)
except OSError:
pass
HERE = abspath(dirname(__file__))
INPUT_DIR = join(HERE, "..", "input")
REGRTEST_DATA_DIR = join(HERE, "..", "regrtest_data")
DATA_DIR = join(HERE, "..", "data")
@contextmanager
def tempdir() -> Iterator[str]:
"""Create a temp directory and change the current location to it.
This is supposed to be used with a *with* statement.
"""
tmp = tempfile.mkdtemp()
# Get real path of tempfile, otherwise test fail on mac os x
current_dir = getcwd()
chdir(tmp)
abs_tmp = abspath(".")
try:
yield abs_tmp
finally:
chdir(current_dir)
rmtree(abs_tmp)
def create_files(paths: List[str], chroot: str = ".") -> None:
"""Creates directories and files found in <path>.
:param list paths: list of relative paths to files or directories
:param str chroot: the root directory in which paths will be created
>>> from os.path import isdir, isfile
>>> isdir('/tmp/a')
False
>>> create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], '/tmp')
>>> isdir('/tmp/a')
True
>>> isdir('/tmp/a/b/c')
True
>>> isfile('/tmp/a/b/c/d/e.py')
True
>>> isfile('/tmp/a/b/foo.py')
True
"""
dirs, files = set(), set()
for path in paths:
path = join(chroot, path)
filename = basename(path)
# path is a directory path
if filename == "":
dirs.add(path)
# path is a filename path
else:
dirs.add(dirname(path))
files.add(path)
for dirpath in dirs:
if not isdir(dirpath):
os.makedirs(dirpath)
for filepath in files:
with open(filepath, "w", encoding="utf-8"):
pass
@pytest.fixture
def fake_path() -> Iterator[Iterable[str]]:
orig = list(sys.path)
fake: Iterable[str] = ["1", "2", "3"]
sys.path[:] = fake
yield fake
sys.path[:] = orig
def test_no_args(fake_path: List[int]) -> None:
with lint.fix_import_path([]):
assert sys.path == fake_path
assert sys.path == fake_path
@pytest.mark.parametrize(
"case", [["a/b/"], ["a/b"], ["a/b/__init__.py"], ["a/"], ["a"]]
)
def test_one_arg(fake_path: List[str], case: List[str]) -> None:
with tempdir() as chroot:
create_files(["a/b/__init__.py"])
expected = [join(chroot, "a")] + fake_path
assert sys.path == fake_path
with lint.fix_import_path(case):
assert sys.path == expected
assert sys.path == fake_path
@pytest.mark.parametrize(
"case",
[
["a/b", "a/c"],
["a/c/", "a/b/"],
["a/b/__init__.py", "a/c/__init__.py"],
["a", "a/c/__init__.py"],
],
)
def test_two_similar_args(fake_path, case):
with tempdir() as chroot:
create_files(["a/b/__init__.py", "a/c/__init__.py"])
expected = [join(chroot, "a")] + fake_path
assert sys.path == fake_path
with lint.fix_import_path(case):
assert sys.path == expected
assert sys.path == fake_path
@pytest.mark.parametrize(
"case",
[
["a/b/c/__init__.py", "a/d/__init__.py", "a/e/f.py"],
["a/b/c", "a", "a/e"],
["a/b/c", "a", "a/b/c", "a/e", "a"],
],
)
def test_more_args(fake_path, case):
with tempdir() as chroot:
create_files(["a/b/c/__init__.py", "a/d/__init__.py", "a/e/f.py"])
expected = [
join(chroot, suffix)
for suffix in (sep.join(("a", "b")), "a", sep.join(("a", "e")))
] + fake_path
assert sys.path == fake_path
with lint.fix_import_path(case):
assert sys.path == expected
assert sys.path == fake_path
@pytest.fixture(scope="module")
def disable():
return ["I"]
@pytest.fixture(scope="module")
def reporter():
return testutils.GenericTestReporter
@pytest.fixture
def init_linter(linter: PyLinter) -> PyLinter:
linter.open()
linter.set_current_module("toto")
linter.file_state = FileState("toto")
return linter
def test_pylint_visit_method_taken_in_account(linter: PyLinter) -> None:
class CustomChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "custom"
msgs = {"W9999": ("", "custom", "")}
@check_messages("custom")
def visit_class(self, _):
pass
linter.register_checker(CustomChecker(linter))
linter.open()
out = StringIO()
linter.set_reporter(text.TextReporter(out))
linter.check(["abc"])
def test_enable_message(init_linter: PyLinter) -> None:
linter = init_linter
assert linter.is_message_enabled("W0101")
assert linter.is_message_enabled("W0102")
linter.disable("W0101", scope="package")
linter.disable("W0102", scope="module", line=1)
assert not linter.is_message_enabled("W0101")
assert not linter.is_message_enabled("W0102", 1)
linter.set_current_module("tutu")
assert not linter.is_message_enabled("W0101")
assert linter.is_message_enabled("W0102")
linter.enable("W0101", scope="package")
linter.enable("W0102", scope="module", line=1)
assert linter.is_message_enabled("W0101")
assert linter.is_message_enabled("W0102", 1)
def test_enable_message_category(init_linter: PyLinter) -> None:
linter = init_linter
assert linter.is_message_enabled("W0101")
assert linter.is_message_enabled("C0202")
linter.disable("W", scope="package")
linter.disable("C", scope="module", line=1)
assert not linter.is_message_enabled("W0101")
assert linter.is_message_enabled("C0202")
assert not linter.is_message_enabled("C0202", line=1)
linter.set_current_module("tutu")
assert not linter.is_message_enabled("W0101")
assert linter.is_message_enabled("C0202")
linter.enable("W", scope="package")
linter.enable("C", scope="module", line=1)
assert linter.is_message_enabled("W0101")
assert linter.is_message_enabled("C0202")
assert linter.is_message_enabled("C0202", line=1)
def test_message_state_scope(init_linter: PyLinter) -> None:
class FakeConfig:
confidence = ["HIGH"]
linter = init_linter
linter.disable("C0202")
assert MSG_STATE_SCOPE_CONFIG == linter.get_message_state_scope("C0202")
linter.disable("W0101", scope="module", line=3)
assert MSG_STATE_SCOPE_CONFIG == linter.get_message_state_scope("C0202")
assert MSG_STATE_SCOPE_MODULE == linter.get_message_state_scope("W0101", 3)
linter.enable("W0102", scope="module", line=3)
assert MSG_STATE_SCOPE_MODULE == linter.get_message_state_scope("W0102", 3)
linter.config = FakeConfig()
assert MSG_STATE_CONFIDENCE == linter.get_message_state_scope(
"this-is-bad", confidence=interfaces.INFERENCE
)
def test_enable_message_block(init_linter: PyLinter) -> None:
linter = init_linter
linter.open()
filepath = join(REGRTEST_DATA_DIR, "func_block_disable_msg.py")
linter.set_current_module("func_block_disable_msg")
astroid = linter.get_ast(filepath, "func_block_disable_msg")
linter.process_tokens(tokenize_module(astroid))
fs = linter.file_state
fs.collect_block_lines(linter.msgs_store, astroid)
# global (module level)
assert linter.is_message_enabled("W0613")
assert linter.is_message_enabled("E1101")
# meth1
assert linter.is_message_enabled("W0613", 13)
# meth2
assert not linter.is_message_enabled("W0613", 18)
# meth3
assert not linter.is_message_enabled("E1101", 24)
assert linter.is_message_enabled("E1101", 26)
# meth4
assert not linter.is_message_enabled("E1101", 32)
assert linter.is_message_enabled("E1101", 36)
# meth5
assert not linter.is_message_enabled("E1101", 42)
assert not linter.is_message_enabled("E1101", 43)
assert linter.is_message_enabled("E1101", 46)
assert not linter.is_message_enabled("E1101", 49)
assert not linter.is_message_enabled("E1101", 51)
# meth6
assert not linter.is_message_enabled("E1101", 57)
assert linter.is_message_enabled("E1101", 61)
assert not linter.is_message_enabled("E1101", 64)
assert not linter.is_message_enabled("E1101", 66)
assert linter.is_message_enabled("E0602", 57)
assert linter.is_message_enabled("E0602", 61)
assert not linter.is_message_enabled("E0602", 62)
assert linter.is_message_enabled("E0602", 64)
assert linter.is_message_enabled("E0602", 66)
# meth7
assert not linter.is_message_enabled("E1101", 70)
assert linter.is_message_enabled("E1101", 72)
assert linter.is_message_enabled("E1101", 75)
assert linter.is_message_enabled("E1101", 77)
fs = linter.file_state
assert fs._suppression_mapping["W0613", 18] == 17
assert fs._suppression_mapping["E1101", 33] == 30
assert ("E1101", 46) not in fs._suppression_mapping
assert fs._suppression_mapping["C0302", 18] == 1
assert fs._suppression_mapping["C0302", 50] == 1
# This is tricky. While the disable in line 106 is disabling
# both 108 and 110, this is usually not what the user wanted.
# Therefore, we report the closest previous disable comment.
assert fs._suppression_mapping["E1101", 108] == 106
assert fs._suppression_mapping["E1101", 110] == 109
def test_enable_by_symbol(init_linter: PyLinter) -> None:
"""messages can be controlled by symbolic names.
The state is consistent across symbols and numbers.
"""
linter = init_linter
assert linter.is_message_enabled("W0101")
assert linter.is_message_enabled("unreachable")
assert linter.is_message_enabled("W0102")
assert linter.is_message_enabled("dangerous-default-value")
linter.disable("unreachable", scope="package")
linter.disable("dangerous-default-value", scope="module", line=1)
assert not linter.is_message_enabled("W0101")
assert not linter.is_message_enabled("unreachable")
assert not linter.is_message_enabled("W0102", 1)
assert not linter.is_message_enabled("dangerous-default-value", 1)
linter.set_current_module("tutu")
assert not linter.is_message_enabled("W0101")
assert not linter.is_message_enabled("unreachable")
assert linter.is_message_enabled("W0102")
assert linter.is_message_enabled("dangerous-default-value")
linter.enable("unreachable", scope="package")
linter.enable("dangerous-default-value", scope="module", line=1)
assert linter.is_message_enabled("W0101")
assert linter.is_message_enabled("unreachable")
assert linter.is_message_enabled("W0102", 1)
assert linter.is_message_enabled("dangerous-default-value", 1)
def test_enable_report(linter: PyLinter) -> None:
assert linter.report_is_enabled("RP0001")
linter.disable("RP0001")
assert not linter.report_is_enabled("RP0001")
linter.enable("RP0001")
assert linter.report_is_enabled("RP0001")
def test_report_output_format_aliased(linter: PyLinter) -> None:
text.register(linter)
linter.set_option("output-format", "text")
assert linter.reporter.__class__.__name__ == "TextReporter"
def test_set_unsupported_reporter(linter: PyLinter) -> None:
text.register(linter)
with pytest.raises(exceptions.InvalidReporterError):
linter.set_option("output-format", "missing.module.Class")
def test_set_option_1(linter: PyLinter) -> None:
linter.set_option("disable", "C0111,W0234")
assert not linter.is_message_enabled("C0111")
assert not linter.is_message_enabled("W0234")
assert linter.is_message_enabled("W0113")
assert not linter.is_message_enabled("missing-docstring")
assert not linter.is_message_enabled("non-iterator-returned")
def test_set_option_2(linter: PyLinter) -> None:
linter.set_option("disable", ("C0111", "W0234"))
assert not linter.is_message_enabled("C0111")
assert not linter.is_message_enabled("W0234")
assert linter.is_message_enabled("W0113")
assert not linter.is_message_enabled("missing-docstring")
assert not linter.is_message_enabled("non-iterator-returned")
def test_enable_checkers(linter: PyLinter) -> None:
linter.disable("design")
assert not ("design" in [c.name for c in linter.prepare_checkers()])
linter.enable("design")
assert "design" in [c.name for c in linter.prepare_checkers()]
def test_errors_only(linter: PyLinter) -> None:
linter.error_mode()
checkers = linter.prepare_checkers()
checker_names = {c.name for c in checkers}
should_not = {"design", "format", "metrics", "miscellaneous", "similarities"}
assert set() == should_not & checker_names
def test_disable_similar(linter: PyLinter) -> None:
linter.set_option("disable", "RP0801")
linter.set_option("disable", "R0801")
assert not ("similarities" in [c.name for c in linter.prepare_checkers()])
def test_disable_alot(linter: PyLinter) -> None:
"""check that we disabled a lot of checkers"""
linter.set_option("reports", False)
linter.set_option("disable", "R,C,W")
checker_names = [c.name for c in linter.prepare_checkers()]
for cname in ("design", "metrics", "similarities"):
assert not (cname in checker_names), cname
def test_addmessage(linter: PyLinter) -> None:
linter.set_reporter(testutils.GenericTestReporter())
linter.open()
linter.set_current_module("0123")
linter.add_message("C0301", line=1, args=(1, 2))
linter.add_message("line-too-long", line=2, args=(3, 4))
assert len(linter.reporter.messages) == 2
assert linter.reporter.messages[0] == Message(
msg_id="C0301",
symbol="line-too-long",
msg="Line too long (1/2)",
confidence=interfaces.Confidence(
name="UNDEFINED",
description="Warning without any associated confidence level.",
),
location=MessageLocationTuple(
abspath="0123", path="0123", module="0123", obj="", line=1, column=0
),
)
assert linter.reporter.messages[1] == Message(
msg_id="C0301",
symbol="line-too-long",
msg="Line too long (3/4)",
confidence=interfaces.Confidence(
name="UNDEFINED",
description="Warning without any associated confidence level.",
),
location=MessageLocationTuple(
abspath="0123", path="0123", module="0123", obj="", line=2, column=0
),
)
def test_addmessage_invalid(linter: PyLinter) -> None:
linter.set_reporter(testutils.GenericTestReporter())
linter.open()
linter.set_current_module("0123")
with pytest.raises(InvalidMessageError) as cm:
linter.add_message("line-too-long", args=(1, 2))
assert str(cm.value) == "Message C0301 must provide line, got None"
with pytest.raises(InvalidMessageError) as cm:
linter.add_message("line-too-long", line=2, node="fake_node", args=(1, 2))
assert (
str(cm.value)
== "Message C0301 must only provide line, got line=2, node=fake_node"
)
with pytest.raises(InvalidMessageError) as cm:
linter.add_message("C0321")
assert str(cm.value) == "Message C0321 must provide Node, got None"
def test_load_plugin_command_line() -> None:
dummy_plugin_path = join(REGRTEST_DATA_DIR, "dummy_plugin")
sys.path.append(dummy_plugin_path)
run = Run(
["--load-plugins", "dummy_plugin", join(REGRTEST_DATA_DIR, "empty.py")],
exit=False,
)
assert (
len([ch.name for ch in run.linter.get_checkers() if ch.name == "dummy_plugin"])
== 2
)
sys.path.remove(dummy_plugin_path)
def test_load_plugin_config_file() -> None:
dummy_plugin_path = join(REGRTEST_DATA_DIR, "dummy_plugin")
sys.path.append(dummy_plugin_path)
config_path = join(REGRTEST_DATA_DIR, "dummy_plugin.rc")
run = Run(
["--rcfile", config_path, join(REGRTEST_DATA_DIR, "empty.py")],
exit=False,
)
assert (
len([ch.name for ch in run.linter.get_checkers() if ch.name == "dummy_plugin"])
== 2
)
sys.path.remove(dummy_plugin_path)
def test_load_plugin_configuration() -> None:
dummy_plugin_path = join(REGRTEST_DATA_DIR, "dummy_plugin")
sys.path.append(dummy_plugin_path)
run = Run(
[
"--load-plugins",
"dummy_conf_plugin",
"--ignore",
"foo,bar",
join(REGRTEST_DATA_DIR, "empty.py"),
],
exit=False,
)
assert run.linter.config.black_list == ["foo", "bar", "bin"]
def test_init_hooks_called_before_load_plugins() -> None:
with pytest.raises(RuntimeError):
Run(["--load-plugins", "unexistant", "--init-hook", "raise RuntimeError"])
with pytest.raises(RuntimeError):
Run(["--init-hook", "raise RuntimeError", "--load-plugins", "unexistant"])
def test_analyze_explicit_script(linter: PyLinter) -> None:
linter.set_reporter(testutils.GenericTestReporter())
linter.check([os.path.join(DATA_DIR, "ascript")])
assert len(linter.reporter.messages) == 1
assert linter.reporter.messages[0] == Message(
msg_id="C0301",
symbol="line-too-long",
msg="Line too long (175/100)",
confidence=interfaces.Confidence(
name="UNDEFINED",
description="Warning without any associated confidence level.",
),
location=MessageLocationTuple(
abspath=os.path.join(abspath(dirname(__file__)), "ascript").replace(
f"lint{os.path.sep}ascript", f"data{os.path.sep}ascript"
),
path=f"tests{os.path.sep}data{os.path.sep}ascript",
module="data.ascript",
obj="",
line=2,
column=0,
),
)
def test_full_documentation(linter: PyLinter) -> None:
out = StringIO()
print_full_documentation(linter, out)
output = out.getvalue()
# A few spot checks only
for re_str in (
# autogenerated text
"^Pylint global options and switches$",
"Verbatim name of the checker is ``variables``",
# messages
"^:undefined-loop-variable \\(W0631\\): *",
# options
"^:dummy-variables-rgx:",
):
regexp = re.compile(re_str, re.MULTILINE)
assert re.search(regexp, output)
def test_list_msgs_enabled(init_linter: PyLinter, capsys: CaptureFixture) -> None:
linter = init_linter
linter.enable("W0101", scope="package")
linter.disable("W0102", scope="package")
linter.list_messages_enabled()
lines = capsys.readouterr().out.splitlines()
assert "Enabled messages:" in lines
assert " unreachable (W0101)" in lines
assert "Disabled messages:" in lines
disabled_ix = lines.index("Disabled messages:")
# W0101 should be in the enabled section
assert lines.index(" unreachable (W0101)") < disabled_ix
assert " dangerous-default-value (W0102)" in lines
# W0102 should be in the disabled section
assert lines.index(" dangerous-default-value (W0102)") > disabled_ix
@pytest.fixture
def pop_pylintrc() -> None:
os.environ.pop("PYLINTRC", None)
@pytest.mark.usefixtures("pop_pylintrc")
def test_pylint_home() -> None:
uhome = os.path.expanduser("~")
if uhome == "~":
expected = ".pylint.d"
else:
expected = platformdirs.user_cache_dir("pylint")
assert config.PYLINT_HOME == expected
try:
pylintd = join(tempfile.gettempdir(), ".pylint.d")
os.environ["PYLINTHOME"] = pylintd
try:
reload(config)
assert config.PYLINT_HOME == pylintd
finally:
try:
rmtree(pylintd)
except FileNotFoundError:
pass
finally:
del os.environ["PYLINTHOME"]
@pytest.mark.usefixtures("pop_pylintrc")
def test_pylintrc() -> None:
with fake_home():
current_dir = getcwd()
chdir(os.path.dirname(os.path.abspath(sys.executable)))
try:
assert config.find_pylintrc() is None
os.environ["PYLINTRC"] = join(tempfile.gettempdir(), ".pylintrc")
assert config.find_pylintrc() is None
os.environ["PYLINTRC"] = "."
assert config.find_pylintrc() is None
finally:
chdir(current_dir)
reload(config)
@pytest.mark.usefixtures("pop_pylintrc")
def test_pylintrc_parentdir() -> None:
with tempdir() as chroot:
create_files(
[
"a/pylintrc",
"a/b/__init__.py",
"a/b/pylintrc",
"a/b/c/__init__.py",
"a/b/c/d/__init__.py",
"a/b/c/d/e/.pylintrc",
]
)
with fake_home():
assert config.find_pylintrc() is None
results = {
"a": join(chroot, "a", "pylintrc"),
"a/b": join(chroot, "a", "b", "pylintrc"),
"a/b/c": join(chroot, "a", "b", "pylintrc"),
"a/b/c/d": join(chroot, "a", "b", "pylintrc"),
"a/b/c/d/e": join(chroot, "a", "b", "c", "d", "e", ".pylintrc"),
}
for basedir, expected in results.items():
os.chdir(join(chroot, basedir))
assert config.find_pylintrc() == expected
@pytest.mark.usefixtures("pop_pylintrc")
def test_pylintrc_parentdir_no_package() -> None:
with tempdir() as chroot:
with fake_home():
create_files(["a/pylintrc", "a/b/pylintrc", "a/b/c/d/__init__.py"])
assert config.find_pylintrc() is None
results = {
"a": join(chroot, "a", "pylintrc"),
"a/b": join(chroot, "a", "b", "pylintrc"),
"a/b/c": None,
"a/b/c/d": None,
}
for basedir, expected in results.items():
os.chdir(join(chroot, basedir))
assert config.find_pylintrc() == expected
class TestPreprocessOptions:
def _callback(self, name: str, value: Optional[str]) -> None:
self.args.append((name, value))
def test_value_equal(self) -> None:
self.args: List[Tuple[str, Optional[str]]] = []
preprocess_options(
["--foo", "--bar=baz", "--qu=ux"],
{"foo": (self._callback, False), "qu": (self._callback, True)},
)
assert [("foo", None), ("qu", "ux")] == self.args
def test_value_space(self) -> None:
self.args = []
preprocess_options(["--qu", "ux"], {"qu": (self._callback, True)})
assert [("qu", "ux")] == self.args
@staticmethod
def test_error_missing_expected_value() -> None:
with pytest.raises(ArgumentPreprocessingError):
preprocess_options(["--foo", "--bar", "--qu=ux"], {"bar": (None, True)})
with pytest.raises(ArgumentPreprocessingError):
preprocess_options(["--foo", "--bar"], {"bar": (None, True)})
@staticmethod
def test_error_unexpected_value() -> None:
with pytest.raises(ArgumentPreprocessingError):
preprocess_options(
["--foo", "--bar=spam", "--qu=ux"], {"bar": (None, False)}
)
class _CustomPyLinter(PyLinter):
# pylint: disable=too-many-ancestors
@staticmethod
def should_analyze_file(modname: str, path: str, is_argument: bool = False) -> bool:
if os.path.basename(path) == "wrong.py":
return False
return super(_CustomPyLinter, _CustomPyLinter).should_analyze_file(
modname, path, is_argument=is_argument
)
def test_custom_should_analyze_file() -> None:
"""Check that we can write custom should_analyze_file that work
even for arguments.
"""
package_dir = os.path.join(REGRTEST_DATA_DIR, "bad_package")
wrong_file = os.path.join(package_dir, "wrong.py")
for jobs in (1, 2):
reporter = testutils.GenericTestReporter()
linter = _CustomPyLinter()
linter.config.jobs = jobs
linter.config.persistent = 0
linter.open()
linter.set_reporter(reporter)
try:
sys.path.append(os.path.dirname(package_dir))
linter.check([package_dir, wrong_file])
finally:
sys.path.pop()
messages = reporter.messages
assert len(messages) == 1
assert "invalid syntax" in messages[0].msg
# we do the check with jobs=1 as well, so that we are sure that the duplicates
# are created by the multiprocessing problem.
@pytest.mark.parametrize("jobs", [1, 2])
def test_multiprocessing(jobs: int) -> None:
"""Check that multiprocessing does not create duplicates."""
# For the bug (#3584) to show up we need more than one file with issues
# per process
filenames = [
"special_attr_scope_lookup_crash.py",
"syntax_error.py",
"unused_variable.py",
"wildcard.py",
"wrong_import_position.py",
]
reporter = testutils.GenericTestReporter()
linter = PyLinter()
linter.config.jobs = jobs
linter.config.persistent = 0
linter.open()
linter.set_reporter(reporter)
try:
sys.path.append(os.path.dirname(REGRTEST_DATA_DIR))
linter.check([os.path.join(REGRTEST_DATA_DIR, fname) for fname in filenames])
finally:
sys.path.pop()
messages = reporter.messages
assert len(messages) == len(set(messages))
def test_filename_with__init__(init_linter: PyLinter) -> None:
# This tracks a regression where a file whose name ends in __init__.py,
# such as flycheck__init__.py, would accidentally lead to linting the
# entire containing directory.
reporter = testutils.GenericTestReporter()
linter = init_linter
linter.open()
linter.set_reporter(reporter)
filepath = join(INPUT_DIR, "not__init__.py")
linter.check([filepath])
messages = reporter.messages
assert len(messages) == 0
def test_by_module_statement_value(init_linter: PyLinter) -> None:
"""Test "statement" for each module analyzed of computed correctly."""
linter = init_linter
linter.check([os.path.join(os.path.dirname(__file__), "data")])
by_module_stats = linter.stats.by_module
for module, module_stats in by_module_stats.items():
linter2 = init_linter
if module == "data":
linter2.check([os.path.join(os.path.dirname(__file__), "data/__init__.py")])
else:
linter2.check([os.path.join(os.path.dirname(__file__), module)])
# Check that the by_module "statement" is equal to the global "statement"
# computed for that module
assert module_stats["statement"] == linter2.stats.statement
| 1 | 16,495 | To accommodate the change to the method being private. | PyCQA-pylint | py |
@@ -90,6 +90,7 @@ namespace AutoRest.Swagger.Model
[CollectionRule(typeof(BodyTopLevelProperties))]
[CollectionRule(typeof(HttpVerbValidation))]
[CollectionRule(typeof(DeleteMustHaveEmptyRequestBody))]
+ [CollectionRule(typeof(PropertiesNamesCamelCase))]
public Dictionary<string, Dictionary<string, Operation>> Paths { get; set; }
/// <summary> | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System;
using System.Linq;
using System.Collections.Generic;
using System.Globalization;
using Newtonsoft.Json;
using AutoRest.Core.Validation;
using AutoRest.Core.Logging;
using AutoRest.Core.Utilities.Collections;
using AutoRest.Swagger.Validation;
using System.Text.RegularExpressions;
namespace AutoRest.Swagger.Model
{
/// <summary>
/// Class that represents Swagger 2.0 schema
/// http://json.schemastore.org/swagger-2.0
/// Swagger Object - https://github.com/wordnik/swagger-spec/blob/master/versions/2.0.md#swagger-object-
/// </summary>
public class ServiceDefinition : SpecObject
{
// as long as AAAS does not provide that
public static ServiceDefinition Instance { get; set; }
public ServiceDefinition()
{
Definitions = new Dictionary<string, Schema>();
Schemes = new List<TransferProtocolScheme>();
Consumes = new List<string>();
Produces = new List<string>();
Paths = new Dictionary<string, Dictionary<string, Operation>>();
CustomPaths = new Dictionary<string, Dictionary<string, Operation>>();
Parameters = new Dictionary<string, SwaggerParameter>();
Responses = new Dictionary<string, OperationResponse>();
SecurityDefinitions = new Dictionary<string, SecurityDefinition>();
Security = new List<Dictionary<string, List<string>>>();
Tags = new List<Tag>();
}
/// <summary>
/// Specifies the Swagger Specification version being used.
/// </summary>
public string Swagger { get; set; }
/// <summary>
/// Provides metadata about the API. The metadata can be used by the clients if needed.
/// </summary>
public Info Info { get; set; }
/// <summary>
/// The host (serviceTypeName or ip) serving the API.
/// </summary>
public string Host { get; set; }
/// <summary>
/// The base path on which the API is served, which is relative to the host.
/// </summary>
public string BasePath { get; set; }
/// <summary>
/// The transfer protocol of the API.
/// </summary>
[CollectionRule(typeof(SupportedSchemesWarning))]
public IList<TransferProtocolScheme> Schemes { get; set; }
/// <summary>
/// A list of MIME types the service can consume.
/// </summary>
[CollectionRule(typeof(NonAppJsonTypeWarning))]
public IList<string> Consumes { get; set; }
/// <summary>
/// A list of MIME types the APIs can produce.
/// </summary>
[CollectionRule(typeof(NonAppJsonTypeWarning))]
public IList<string> Produces { get; set; }
/// <summary>
/// Key is actual path and the value is serializationProperty of http operations and operation objects.
/// </summary>
[Rule(typeof(UniqueResourcePaths))]
[Rule(typeof(ListOperationNamingWarning))]
[Rule(typeof(ListByOperationsValidation))]
[Rule(typeof(CollectionObjectPropertiesNamingValidation))]
[Rule(typeof(PutGetPatchResponseValidation))]
[Rule(typeof(OperationsAPIImplementationValidation))]
[Rule(typeof(ProvidersPathValidation))]
[CollectionRule(typeof(BodyTopLevelProperties))]
[CollectionRule(typeof(HttpVerbValidation))]
[CollectionRule(typeof(DeleteMustHaveEmptyRequestBody))]
public Dictionary<string, Dictionary<string, Operation>> Paths { get; set; }
/// <summary>
/// Key is actual path and the value is serializationProperty of http operations and operation objects.
/// </summary>
[JsonProperty("x-ms-paths")]
[Rule(typeof(ListOperationNamingWarning))]
[Rule(typeof(CollectionObjectPropertiesNamingValidation))]
[Rule(typeof(ProvidersPathValidation))]
[CollectionRule(typeof(XmsPathsMustOverloadPaths))]
public Dictionary<string, Dictionary<string, Operation>> CustomPaths { get; set; }
/// <summary>
/// Key is the object serviceTypeName and the value is swagger definition.
/// </summary>
[Rule(typeof(BooleanPropertyNotRecommended))]
[Rule(typeof(ResourceModelValidation))]
[Rule(typeof(TrackedResourceValidation))]
[Rule(typeof(TrackedResourcePatchOperationValidation))]
[Rule(typeof(TrackedResourceGetOperationValidation))]
[Rule(typeof(ResourceIsMsResourceValidation))]
[Rule(typeof(GuidValidation))]
[Rule(typeof(SkuModelValidation))]
public Dictionary<string, Schema> Definitions { get; set; }
/// <summary>
/// Dictionary of parameters that can be used across operations.
/// This property does not define global parameters for all operations.
/// </summary>
[Rule(typeof(ServiceDefinitionParameters))]
[CollectionRule(typeof(AnonymousParameterTypes))]
public Dictionary<string, SwaggerParameter> Parameters { get; set; }
/// <summary>
/// Dictionary of responses that can be used across operations. The key indicates status code.
/// </summary>
public Dictionary<string, OperationResponse> Responses { get; set; }
/// <summary>
/// Key is the object serviceTypeName and the value is swagger security definition.
/// </summary>
public Dictionary<string, SecurityDefinition> SecurityDefinitions { get; set; }
/// <summary>
/// A declaration of which security schemes are applied for the API as a whole.
/// The list of values describes alternative security schemes that can be used
/// (that is, there is a logical OR between the security requirements). Individual
/// operations can override this definition.
/// </summary>
public IList<Dictionary<string, List<string>>> Security { get; set; }
/// <summary>
/// A list of tags used by the specification with additional metadata. The order
/// of the tags can be used to reflect on their order by the parsing tools. Not all
/// tags that are used by the Operation Object must be declared. The tags that are
/// not declared may be organized randomly or based on the tools' logic. Each
/// tag name in the list MUST be unique.
/// </summary>
public IList<Tag> Tags { get; set; }
/// <summary>
/// Additional external documentation
/// </summary>
public ExternalDoc ExternalDocs { get; set; }
/// <summary>
/// Path to this Swagger.
/// </summary>
internal Uri FilePath { get; set; }
/// <summary>
/// Compare a modified document node (this) to a previous one and look for breaking as well as non-breaking changes.
/// </summary>
/// <param name="context">The modified document context.</param>
/// <param name="previous">The original document model.</param>
/// <returns>A list of messages from the comparison.</returns>
public override IEnumerable<ComparisonMessage> Compare(ComparisonContext context, SwaggerBase previous)
{
if (previous == null)
throw new ArgumentNullException("previous");
context.CurrentRoot = this;
context.PreviousRoot = previous;
base.Compare(context, previous);
var previousDefinition = previous as ServiceDefinition;
if (previousDefinition == null)
throw new ArgumentException("Comparing a service definition with something else.");
if (Info != null && previousDefinition.Info != null)
{
context.PushProperty("info");
context.PushProperty("version");
CompareVersions(context, Info.Version, previousDefinition.Info.Version);
context.Pop();
context.Pop();
}
if (context.Strict)
{
// There was no version change between the documents. This is not an error, but noteworthy.
context.LogInfo(ComparisonMessages.NoVersionChange);
}
// Check that all the protocols of the old version are supported by the new version.
context.PushProperty("schemes");
foreach (var scheme in previousDefinition.Schemes)
{
if (!Schemes.Contains(scheme))
{
context.LogBreakingChange(ComparisonMessages.ProtocolNoLongerSupported, scheme);
}
}
context.Pop();
// Check that all the request body formats that were accepted still are.
context.PushProperty("consumes");
foreach (var format in previousDefinition.Consumes)
{
if (!Consumes.Contains(format))
{
context.LogBreakingChange(ComparisonMessages.RequestBodyFormatNoLongerSupported, format);
}
}
context.Pop();
// Check that all the response body formats were also supported by the old version.
context.PushProperty("produces");
foreach (var format in Produces)
{
if (!previousDefinition.Produces.Contains(format))
{
context.LogBreakingChange(ComparisonMessages.ResponseBodyFormatNowSupported, format);
}
}
context.Pop();
// Check that no paths were removed, and compare the paths that are still there.
var newPaths = RemovePathVariables(Paths);
context.PushProperty("paths");
foreach (var path in previousDefinition.Paths.Keys)
{
var p = Regex.Replace(path, @"\{\w*\}", @"{}");
context.PushProperty(path);
Dictionary<string, Operation> operations = null;
if (!newPaths.TryGetValue(p, out operations))
{
context.LogBreakingChange(ComparisonMessages.RemovedPath, path);
}
else
{
Dictionary<string, Operation> previousOperations = previousDefinition.Paths[path];
foreach (var previousOperation in previousOperations)
{
Operation newOperation = null;
if (!operations.TryGetValue(previousOperation.Key, out newOperation))
{
context.LogBreakingChange(ComparisonMessages.RemovedOperation, previousOperation.Value.OperationId);
}
}
foreach (var operation in operations)
{
Operation previousOperation = null;
if (previousDefinition.Paths[path].TryGetValue(operation.Key, out previousOperation))
{
context.PushProperty(operation.Key);
operation.Value.Compare(context, previousOperation);
context.Pop();
}
}
}
context.Pop();
}
context.Pop();
newPaths = RemovePathVariables(CustomPaths);
context.PushProperty("x-ms-paths");
foreach (var path in previousDefinition.CustomPaths.Keys)
{
var p = Regex.Replace(path, @"\{\w*\}", @"{}");
context.PushProperty(path);
Dictionary<string, Operation> operations = null;
if (!newPaths.TryGetValue(p, out operations))
{
context.LogBreakingChange(ComparisonMessages.RemovedPath, path);
}
else
{
Dictionary<string, Operation> previousOperations = previousDefinition.CustomPaths[path];
foreach (var previousOperation in previousOperations)
{
Operation newOperation = null;
if (!operations.TryGetValue(previousOperation.Key, out newOperation))
{
context.LogBreakingChange(ComparisonMessages.RemovedOperation, previousOperation.Value.OperationId);
}
}
foreach (var operation in operations)
{
Operation previousOperation = null;
if (previousDefinition.CustomPaths[path].TryGetValue(operation.Key, out previousOperation))
{
context.PushProperty(operation.Key);
operation.Value.Compare(context, previousOperation);
context.Pop();
}
}
}
context.Pop();
}
context.Pop();
ReferenceTrackSchemas(this);
ReferenceTrackSchemas(previousDefinition);
context.PushProperty("parameters");
foreach (var def in previousDefinition.Parameters.Keys)
{
SwaggerParameter parameter = null;
if (!Parameters.TryGetValue(def, out parameter))
{
context.LogBreakingChange(ComparisonMessages.RemovedClientParameter, def);
}
else
{
context.PushProperty(def);
parameter.Compare(context, previousDefinition.Parameters[def]);
context.Pop();
}
}
context.Pop();
context.PushProperty("responses");
foreach (var def in previousDefinition.Responses.Keys)
{
OperationResponse response = null;
if (!Responses.TryGetValue(def, out response))
{
context.LogBreakingChange(ComparisonMessages.RemovedDefinition, def);
}
else
{
context.PushProperty(def);
response.Compare(context, previousDefinition.Responses[def]);
context.Pop();
}
}
context.Pop();
context.PushProperty("definitions");
foreach (var def in previousDefinition.Definitions.Keys)
{
Schema schema = null;
Schema oldSchema = previousDefinition.Definitions[def];
if (!Definitions.TryGetValue(def, out schema))
{
if (oldSchema.IsReferenced)
// It's only an error if the definition is referenced in the old service.
context.LogBreakingChange(ComparisonMessages.RemovedDefinition, def);
}
else if (schema.IsReferenced && oldSchema.IsReferenced)
{
context.PushProperty(def);
schema.Compare(context, previousDefinition.Definitions[def]);
context.Pop();
}
}
context.Pop();
context.Pop();
return context.Messages;
}
/// <summary>
/// Since renaming a path parameter doesn't logically alter the path, we must remove the parameter names
/// before comparing paths using string comparison.
/// </summary>
/// <param name="paths">A dictionary of paths, potentially with embedded parameter names.</param>
/// <returns>A transformed dictionary, where paths do not embed parameter names.</returns>
private Dictionary<string, Dictionary<string, Operation>> RemovePathVariables(Dictionary<string, Dictionary<string, Operation>> paths)
{
var result = new Dictionary<string, Dictionary<string, Operation>>();
foreach (var kv in paths)
{
var p = Regex.Replace(kv.Key, @"\{\w*\}", @"{}");
result[p] = kv.Value;
}
return result;
}
/// <summary>
/// Since some services may rely on semantic versioning, comparing versions is fairly complex.
/// </summary>
/// <param name="context">A comparison context.</param>
/// <param name="newVer">The new version string.</param>
/// <param name="oldVer">The old version string</param>
/// <remarks>
/// In semantic versioning schemes, only the major and minor version numbers are considered when comparing versions.
/// Build numbers are ignored.
/// </remarks>
private void CompareVersions(ComparisonContext context, string newVer, string oldVer)
{
var oldVersion = oldVer.Split('.');
var newVersion = newVer.Split('.');
// If the version consists only of numbers separated by '.', we'll consider it semantic versioning.
if (!context.Strict && oldVersion.Length > 0 && newVersion.Length > 0)
{
bool versionChanged = false;
// Situation 1: The versioning scheme is semantic, i.e. it uses a major.minr.build-number scheme, where each component is an integer.
// In this case, we care about the major/minor numbers, but not the build number. In othe words, ifall that is different
// is the build number, it will not be treated as a version change.
int oldMajor = 0, newMajor = 0;
bool integers = int.TryParse(oldVersion[0], out oldMajor) && int.TryParse(newVersion[0], out newMajor);
if (integers && oldMajor != newMajor)
{
versionChanged = true;
if (oldMajor > newMajor)
{
context.LogError(ComparisonMessages.VersionsReversed, oldVer, newVer);
}
}
if (!versionChanged && integers && oldVersion.Length > 1 && newVersion.Length > 1)
{
int oldMinor = 0, newMinor = 0;
integers = int.TryParse(oldVersion[1], out oldMinor) && int.TryParse(newVersion[1], out newMinor);
if (integers && oldMinor != newMinor)
{
versionChanged = true;
if (oldMinor > newMinor)
{
context.LogError(ComparisonMessages.VersionsReversed, oldVer, newVer);
}
}
}
// Situation 2: The versioning scheme is something else, maybe a date or just a label?
// Regardless of what it is, we just check whether the two strings are equal or not.
if (!versionChanged && !integers)
{
versionChanged = !oldVer.ToLower().Equals(newVer.ToLower());
}
context.Strict = !versionChanged;
}
}
/// <summary>
/// In order to avoid comparing definitions (schemas) that are not used, we go through all references that are
/// found in operations, global parameters, and global responses. Definitions that are referenced from other
/// definitions are included only by transitive closure.
/// </summary>
private static void ReferenceTrackSchemas(ServiceDefinition service)
{
foreach (var schema in service.Definitions.Values)
{
schema.IsReferenced = false;
}
foreach (var path in service.Paths.Values)
{
foreach (var operation in path.Values)
{
foreach (var parameter in operation.Parameters)
{
if (parameter.Schema != null && !string.IsNullOrWhiteSpace(parameter.Schema.Reference))
{
var schema = FindReferencedSchema(parameter.Schema.Reference, service.Definitions);
schema.IsReferenced = true;
}
}
}
}
foreach (var path in service.CustomPaths.Values)
{
foreach (var operation in path.Values)
{
foreach (var parameter in operation.Parameters)
{
if (parameter.Schema != null && !string.IsNullOrWhiteSpace(parameter.Schema.Reference))
{
var schema = FindReferencedSchema(parameter.Schema.Reference, service.Definitions);
schema.IsReferenced = true;
}
}
}
}
foreach (var parameter in service.Parameters.Values)
{
if (parameter.Schema != null && !string.IsNullOrWhiteSpace(parameter.Schema.Reference))
{
var schema = FindReferencedSchema(parameter.Schema.Reference, service.Definitions);
schema.IsReferenced = true;
}
}
foreach (var response in service.Responses.Values)
{
if (response.Schema != null && !string.IsNullOrWhiteSpace(response.Schema.Reference))
{
var schema = FindReferencedSchema(response.Schema.Reference, service.Definitions);
schema.IsReferenced = true;
}
}
var changed = true;
while (changed)
{
changed = false;
foreach (var schema in service.Definitions.Values.Where(d => d.IsReferenced))
{
foreach (var property in schema.Properties.Values)
{
if (!string.IsNullOrWhiteSpace(property.Reference))
{
var s = FindReferencedSchema(property.Reference, service.Definitions);
changed = changed || !s.IsReferenced;
s.IsReferenced = true;
}
}
}
}
}
/// <summary>
/// Retrieve a schema from the definitions section.
/// </summary>
/// <param name="reference">A document-relative reference path -- #/definitions/XXX</param>
/// <param name="definitions">The definitions dictionary to use</param>
/// <returns></returns>
private static Schema FindReferencedSchema(string reference, IDictionary<string, Schema> definitions)
{
if (reference != null && reference.StartsWith("#", StringComparison.Ordinal))
{
var parts = reference.Split('/');
if (parts.Length == 3 && parts[1].Equals("definitions"))
{
Schema p = null;
if (definitions.TryGetValue(parts[2], out p))
{
return p;
}
}
}
return null;
}
}
} | 1 | 23,969 | Shouldn't we traverse all definitions and apply this rule over all model definitions in the doc | Azure-autorest | java |
@@ -0,0 +1,11 @@
+/**
+ * BSD-style license; for more info see http://pmd.sourceforge.net/license.html
+ */
+
+package net.sourceforge.pmd.lang.java.rule.design;
+
+import net.sourceforge.pmd.testframework.PmdRuleTst;
+
+public class MutableStaticState extends PmdRuleTst {
+ // no additional unit tests
+} | 1 | 1 | 18,566 | Please rename this class that it ends with the suffix "Test" - otherwise the tests won't be executed. | pmd-pmd | java |
|
@@ -0,0 +1,7 @@
+// This file is here, in part, so that users whose code editors are
+// set to automatically format files with Prettier have a config to detect.
+// Many users only run Prettier when a config is present, so this file makes
+// sure one can be detected, even though we aren't doing anything with it.
+module.exports = {
+ ...require('@wordpress/prettier-config'),
+} | 1 | 1 | 40,252 | As a JS file, this should also receive our standard file header. | google-site-kit-wp | js |
|
@@ -1784,7 +1784,9 @@ short HashJoin::codeGen(Generator * generator) {
UInt16 numBMOsInFrag = (UInt16)generator->getFragmentDir()->getNumBMOs();
double memQuota = 0;
-
+ double memQuotaRatio;
+ Lng32 numStreams;
+ double bmoMemoryUsagePerNode = getEstimatedRunTimeMemoryUsage(TRUE, &numStreams).value();
if (mmu != 0) {
memQuota = mmu;
hashj_tdb->setMemoryQuotaMB(mmu); | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
******************************************************************************
*
* File: GenRelJoin.C
* Description: Join operators
*
*
* Created: 5/17/94
* Language: C++
*
*
*
*
******************************************************************************
*/
#include "ComDefs.h" // to get common defines (ROUND8)
#include "limits.h"
#include "ComOptIncludes.h"
#include "RelJoin.h"
#include "GroupAttr.h"
#include "Analyzer.h"
#include "Generator.h"
#include "GenExpGenerator.h"
//#include "ex_stdh.h"
#include "ExpCriDesc.h"
#include "ComTdb.h"
//#include "ex_tcb.h"
#include "ComTdbOnlj.h"
#include "ComTdbHashj.h"
#include "ComTdbMj.h"
#include "ComTdbTupleFlow.h"
#include "HashBufferHeader.h"
#if 0
// unused feature, done as part of SQ SQL code cleanup effort
#include "ComTdbSimpleSample.h"
#endif // if 0
#include "DefaultConstants.h"
#include "HashRow.h"
#include "hash_table.h" // for HashTableHeader
#include "ExpSqlTupp.h" // for sizeof(tupp_descriptor)
#include "sql_buffer.h"
#include "sql_buffer_size.h"
#include "CmpStatement.h"
#include "ComUnits.h"
/////////////////////////////////////////////////////////////////////
//
// Contents:
//
// HashJoin::codeGen()
// MergeJoin::codeGen()
// NestedJoin::codeGen()
// NestedJoinFlow::codeGen()
// Join::instantiateValuesForLeftJoin()
//
/////////////////////////////////////////////////////////////////////
short HashJoin::codeGen(Generator * generator) {
// Decide if this join can use the Unique Hash Join option. This
// option can be significantly faster than the regular hash join,
// but does not support many features.
//
NABoolean useUniqueHashJoin = canUseUniqueHashJoin();
ExpGenerator * expGen = generator->getExpGenerator();
Space * space = generator->getSpace();
GenAssert( ! isSemiJoin() || ! isAntiSemiJoin(),
"Node can not be both semi-join and anti-semi-join" );
GenAssert( ! isReuse() || isNoOverflow(),
"Reuse of inner table requires no-overflow!" );
// set flag to enable pcode for indirect varchar
NABoolean vcflag = expGen->handleIndirectVC();
if (CmpCommon::getDefault(VARCHAR_PCODE) == DF_ON) {
expGen->setHandleIndirectVC( TRUE );
}
// values are returned from the right when it is not (anti) semi join
NABoolean rightOutputNeeded = ! isSemiJoin() && ! isAntiSemiJoin() ;
// the minMaxExpr is used when the min max optimization is in
// effect. It is evaluated at the same time as the rightHashExpr
// and computes the min and max values for one or more of the join
// values coming from the right (inner) side.
ex_expr * minMaxExpr = 0;
// the rightHashExpr takes a row from the right child (the "build
// table") and calculates the hash value. After the expression is
// evaluated, the hash value is available in the hashValueTupp_ of
// the hash join tcb (compare the description of the workAtp).
ex_expr * rightHashExpr = 0;
// the rightMoveInExpr expression moves the incoming right row
// to a contiguous buffer. This buffer is later inserted into the
// hash table. The row in the buffer looks like a right row with a
// header added to it. Thus, it is called "extended" right row. The
// row header contains the hash value and a pointer for the hash
// chain.
ex_expr * rightMoveInExpr = 0;
// the rightMoveOutExpr moves a "extended" right row to a regular right
// row (one without a row header). This is done if the "extended" right
// row is part of a result.
// This expression is not really required. The "extended" row in the
// hash buffer is always contiguous. So is the row in the result buffer.
// Thus, the executor can simply use a byte move. This is true, if the
// "extended" row in the hash buffer has the following format:
//
// |------------------------------------------------------|
// | Header | right columns | hash expression (see below) |
// |------------------------------------------------------|
//
// A byte move just skips the header (hash value & next pointer) and
// hash expression. It only moves the right columns. The following code
// guarantees that the hash expression always follows the right row
// data.
//
// Please note that for now the generator still generates the
// rightMoveOutExpr. It is simply not used by the executor.
ex_expr * rightMoveOutExpr = 0;
// the rightSearchExpr compares two "extended" rows from the right child.
// Both rows are stored in the contiguous buffer of the appropriate
// cluster. The expression is used while chaining rows into the hash
// table. The result of the expression is boolean.
ex_expr *rightSearchExpr = 0;
// the leftHashExpr takes a row from the left child and calculates
// the hash value. After the expression is evaluated, the hash value is
// available in the hashValueTupp_ of the hash join tcb.
ex_expr * leftHashExpr = 0;
// the leftMoveExpr moves the incoming left row dircetly to the parents
// buffer. This happens during phase 2 of the hash join if the hash table
// is immediately probed with the left row. If the row qualifies, there
// is no need to move it to an "extended" left row first.
ex_expr * leftMoveExpr = 0;
// the leftMoveInExpr moves the incomming left row into a contiguous
// buffer. The row in the buffer is also extended by a row header to
// store the hash value and a hash chain pointer.
ex_expr * leftMoveInExpr = 0;
// the leftMoveOutExpr is used to move an "extended" left row to the
// parents Atp if the row is part of the result. The expression gets
// rid of the row header. Again, this expression is not really required.
// The same arguments as for the rightMoveOutExpr holds. For the
// leftMoveOutExpr it is even easier, because there is no additional
// data (hash expression). Again, for now the expression is generated but
// ignored.
ex_expr * leftMoveOutExpr = 0;
// the probeSearchExpr1 compares an incoming left row with an "extended"
// right row. This expression is used if the hash table is probed right
// away with a left row without moving the left row into a contiguous
// buffer. This happens during phase 2 of the hash join. The result of the
// expression is boolean.
ex_expr * probeSearchExpr1 = 0;
// the probeSearchExpr2 compares an "extended" left row with an "extended"
// right row. The rsult of the expression is boolean. Ths expression is
// used during phase 3 of the hash join
ex_expr * probeSearchExpr2 = 0;
// the leftJoinExpr is used in some left join cases, were we have to
// null instantiate a row.
ex_expr * leftJoinExpr = 0;
// the nullInstForLeftJoinExpr puts null values into the null
// instantiated row. The instatiatied row is a right row without
// a row header.
ex_expr * nullInstForLeftJoinExpr = 0;
// the rightJoinExpr is used in some right join cases, where we have to
// null instantiate a row from the left.
ex_expr * rightJoinExpr = 0;
// the nullInstForRightJoinExpr puts null values into the null instantiated
// row. The instatiatied row is a right row without a row header.
ex_expr * nullInstForRightJoinExpr = 0;
// the beforeJoinPred1 compares a row from the left child with an
// "extended" right row. It is used during phase 2 of the hash join.
// The result is boolean.
ex_expr * beforeJoinPred1 = 0;
// the beforeJoinPred2 compares an "extended" left row with an
// "extended" right row. It is used during phase 3 of the hash join.
// The result is boolean.
ex_expr * beforeJoinPred2 = 0;
// the afterJoinPred1 compares a row from the left child with an
// "extended" right row. It is used during phase 2 of workProbe,
// when there is a matching row. Used when there is no overflow,
// and the left row is a composite row.
// The result is boolean.
ex_expr * afterJoinPred1 = 0;
// the afterJoinPred2 compares an "extended" left row with an
// "extended" right row. It is used during phase 3 of workProbe,
// when there is a matching row. This is used after an overflow,
// when the left row comes from a hash buffer.
// The result is boolean.
ex_expr * afterJoinPred2 = 0;
// variation of afterJoinPred1, used when the "extended" right row has to
// be the NULL instantiated part in a left join. Compares a row from the
// left child with an "NullInstantiated" null tuple representing a right row.
// Used during phase 2 of workProbe when there is no matching right row.
// Used when there is no overflow, and the left row is a composite row.
// The result is boolean.
ex_expr * afterJoinPred3 = 0;
// variation of afterJoinPred2, used when the "extended" right row has to
// be the NULL instantiated part in a left join. Compares an "extended" left
// row with an "NullInstantiated" null tuple representing a right row.
// Used during phase 3 of workProbe when there is no matching right row.
// This is used after an overflow, when the left row comes from a hash buffer.
// The result is boolean.
ex_expr * afterJoinPred4 = 0;
// the afterJoinPred5 compares a "NullInstantiated" null tuple representing a
// left row with a "NullInstantiated" right row. It is used during workReturnRightRows
// to process the rows from the right which did not have a matching left row.
// The result is boolean.
ex_expr * afterJoinPred5 = 0;
const ValueIdSet &rightOutputCols =
child(1)->getGroupAttr()->getCharacteristicOutputs();
// right table columns to be inserted into the return buffer
// (if not semi join)
ValueIdList rightOutputValIds;
// right table columns to be inserted into the hash buffer. For
// "normal" cases these columns are identical to rightOutputValIds.
// However, if the join predicate involves expressions, we also
// move these expressions into the hash buffer
ValueIdList rightBufferValIds;
ValueId valId;
// add only those ValueIds to rightOutputValIds and rightBufferValIds
// which are not part of the input.
for (valId = rightOutputCols.init();
rightOutputCols.next(valId);
rightOutputCols.advance(valId))
if (NOT getGroupAttr()->getCharacteristicInputs().contains(valId)) {
rightOutputValIds.insert(valId);
rightBufferValIds.insert(valId);
};
// left table columns to be inserted into the hash buffer and the
// return buffer
const ValueIdSet &leftOutputCols =
child(0)->getGroupAttr()->getCharacteristicOutputs();
ValueIdList leftOutputValIds;
// UniqueHashJoin does not MOVE the left values.
// It simply passes them to the parent queue using copyAtp()
// So do not build the list of left outputs.
//
if (!useUniqueHashJoin) {
// add only those ValueIds to leftOutputValIds
// which are not part of the input.
for (valId = leftOutputCols.init();
leftOutputCols.next(valId);
leftOutputCols.advance(valId))
if (NOT getGroupAttr()->getCharacteristicInputs().contains(valId))
leftOutputValIds.insert(valId);
}
// allocate 2 map tables, so that we can later remove the right child MT
MapTable * myMapTable0 = generator->appendAtEnd();
MapTable * myMapTable1 = generator->appendAtEnd();
ex_cri_desc * givenDesc = generator->getCriDesc(Generator::DOWN);
// Incoming records will be divided equally (sans data skew) among the ESPs
// so each HJ instance will handle only its share (i.e. divide by #esps)
Lng32 saveNumEsps = generator->getNumESPs();
////////////////////////////////////////////////////////////////////////////
// generate the right child
////////////////////////////////////////////////////////////////////////////
generator->setCriDesc(givenDesc, Generator::DOWN);
child(1)->codeGen(generator);
ComTdb * rightChildTdb = (ComTdb *)(generator->getGenObj());
ExplainTuple *rightExplainTuple = generator->getExplainTuple();
// This value was originally set inside generator by my parent exchange node
// as a global variable. Now need to reset the saveNumEsps value back into
// generator since codegen of child(1) may have changed it.
generator->setNumESPs(saveNumEsps);
// A MapTable for the Min and Max values used by the min/max
// optimization.
MapTable *myMapTableMM = NULL;
// Normally the left down request is the same as the parent request
// (givenDesc). But if this HashJoin is doing the min max
// optimization, then the left down request will contain an extra
// tuple for the min and max values. In this case, we will create a
// new criDesc for the left down request
ex_cri_desc * leftDownDesc = givenDesc;
// The length of the min max tuple used by the min/max optimization.
ULng32 minMaxRowLength = 0;
// If we are doing min/max optimization, get the min/max values into
// the map table and marked as codegen'ed before we codegen the left
// child. These are just the placeholders for the computed min/max
// values. Later we will map the actual min/max values to the at
// same location in the min/max tuple.
if(getMinMaxVals().entries())
{
// This map table is a placeholder, used so we can unlink the
// min/max mapTable (myMapTableMM)
MapTable * myMapTableMM1 = generator->appendAtEnd();
// Add the map table for the min and max values.
myMapTableMM = generator->appendAtEnd();
// Allocate a new leftDownDesc which will have one additional
// tuple for the min/max values
leftDownDesc = new(space) ex_cri_desc(givenDesc->noTuples() + 1, space);
// The index of the min/max tuple in the down request.
short minMaxValsDownAtpIndex = leftDownDesc->noTuples()-1;
// Layout the min/max values in the min/max tuple and add
// corresponding entries into the min/max mapTable
// (myMapTableMM).
expGen->processValIdList(getMinMaxVals(),
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
minMaxRowLength, 0, minMaxValsDownAtpIndex);
// Mark these as codegen'ed so the scans that use these will not
// attempt to generate code for them.
generator->getMapInfo(getMinMaxVals()[0])->codeGenerated();
generator->getMapInfo(getMinMaxVals()[1])->codeGenerated();
generator->unlinkNext(myMapTableMM1);
}
// before we generate the left child, we have to remove the map table of
// the right child, because the left child should not see its siblings
// valueIds (especially since we sometimes see duplicate valueIds due
// to VEGs). Because of these duplicates we have to make sure that
// the order of valueIds in the map table is later right child valueIds,
// left child valueIds. To make a long story short, we first generate
// the right child, unchain the map table of the right child, generate
// the left child and then chain the right childs map table in front
// of the lefts childs map table. The map table interface is not well
// suited for these kinds of chain/unchain operations. This is why we
// see so many empty helper map tables in this method here. There are
// two other possible solutions to this problem:
// 1 - extend the map table interface to make these manipulations easier
// 2 - make changes in the normalizer/VEG code to avoid duplicate
// valueIds.
// For now we have all these extra map tables!
// ok, here we go: unchain right child map table
generator->unlinkNext(myMapTable0);
// Add the min/max maptable so the left child scans that use them will
// have access to them.
if(myMapTableMM)
generator->appendAtEnd(myMapTableMM);
// allocate 2 map tables, so that we can later remove the left child MT
MapTable * myMapTable2 = generator->appendAtEnd();
MapTable * myMapTable3 = generator->appendAtEnd();
////////////////////////////////////////////////////////////////////////////
// generate code for left child
////////////////////////////////////////////////////////////////////////////
generator->setCriDesc(leftDownDesc, Generator::DOWN);
child(0)->codeGen(generator);
ComTdb * leftChildTdb = (ComTdb *)(generator->getGenObj());
ExplainTuple *leftExplainTuple = generator->getExplainTuple();
ex_cri_desc * leftChildDesc = generator->getCriDesc(Generator::UP);
// This value was originally set inside generator by my parent exchange node
// as a global variable. Now need to reset the saveNumEsps value back into
// generator since codegen of my child(0) may have changed it.
generator->setNumESPs(saveNumEsps);
short returnedTuples;
short returnedLeftRowAtpIndex = -1;
short returnedRightRowAtpIndex = -1;
short returnedInstRowAtpIndex = -1;
short returnedInstRowForRightJoinAtpIndex = -1;
// The work atp is passed as the second atp to the expression evaluation
// procs. Hence, its atp value is set to 1.
short workAtpPos = 1; // second atp
short leftRowAtpIndex = -1;
short extLeftRowAtpIndex = -1;
short rightRowAtpIndex = -1;
short extRightRowAtpIndex1 = -1;
short extRightRowAtpIndex2 = -1;
short hashValueAtpIndex = -1;
// The atpIndex of the min/max tuple in the workAtp.
short minMaxValsAtpIndex = -1;
short numOfATPIndexes = -1;
short instRowForLeftJoinAtpIndex = -1;
short instRowForRightJoinAtpIndex = -1;
short prevInputTuppIndex = -1;
// The Unique Hash Join has a different layout for the returned row
// and for the work Atp.
//
if(useUniqueHashJoin) {
////////////////////////////////////////////////////////////////////////////
// Unique Hash Join
// Layout of row returned by this node.
//
// |---------------------------------------------------
// | input data | left child's data | hash table row |
// | | | |
// | | | |
// | ( I tupps ) | ( L tupps ) | ( 0/1 tupp ) |
// |---------------------------------------------------
//
// input data: the atp input to this node by its parent.
// left child data: the tupps from the left row. Only, if the left row
// is required.
// hash table row: the row from the hash table (right child). In case of
// a semi join, this row is not returned
//
////////////////////////////////////////////////////////////////////////////
returnedTuples = leftChildDesc->noTuples();
// Right row goes in last entry.
if ( rightOutputNeeded && rightOutputValIds.entries() > 0) {
returnedRightRowAtpIndex = returnedTuples++;
}
/////////////////////////////////////////////////////////////////////////
// Unique Hash Join Layout of WorkATP
// in all the computation below, the hashed row value ids are available
// in a temporary work atp. They are used from there to build the
// hash table.
// Before returning from this proc, these value ids are moved to the
// map table that is being returned. The work atp contains the following
// entries:
// index what
// -------------------------------------------------------
// 0 constants
// 1 temporaries
// 2 a row from the left child (not used)
// 3 an "extended" row from the right child
// 4 another "extended" row from the right child
// 5 the calculated hash value
// 6 the previous input (in case of HT reuse)
/////////////////////////////////////////////////////////////////////////
// not used by unique hash join, but needed for some common code
extLeftRowAtpIndex = 2;
// not used by unique hash join, but used when generating
// rightMoveOutExpr. unique hash join does not use this
// but needs the maptable generated as a by product.
rightRowAtpIndex = 2;
extRightRowAtpIndex1 = 3;
extRightRowAtpIndex2 = 4;
hashValueAtpIndex = 5;
numOfATPIndexes = 6;
if(isReuse())
prevInputTuppIndex = numOfATPIndexes++;
// If using min/max optimization, add one more tuple for min/max values.
if(getMinMaxVals().entries())
minMaxValsAtpIndex = numOfATPIndexes++;
} else {
////////////////////////////////////////////////////////////////////////////
// Regular Hybrid Hash Join
// Layout of row returned by this node.
//
// |---------------------------------------------------------------------|
// | input data | left child's data | hash table row | instantiated for |
// | | | | right row (left |
// | | | | join) |<|
// | ( I tupps ) | ( 0/1 tupp ) | ( 0/1 tupp ) | ( 0/1 tupp ) | |
// |---------------------------------------------------------------------| |
// |
// |-------------------------------------|
// | ------------------------
// | | instantiated for |
// |--->| left row (right join) |
// | (0/1 tupp) |
// -------------------------
//
// input data: the atp input to this node by its parent.
// left child data: a tupp with the left row. Only, if the left row
// is required. I.e., in certain cases the left row is
// returned.
// hash table row: the row from the hash table (right child). In case of
// a semi join, this row is not returned
// instantiated for
// right row: For some left join cases, the hash table rows or
// the null values are instantiated. See proc
// Join::instantiateValuesForLeftJoin for details at
// the end of this file.
// instantiated for
// left row: For some right join cases, the hash table rows or
// the null values are instantiated. See proc
// Join::instantiateValuesForRightJoin for details at
// the end of this file.
//
// Returned row to parent contains:
//
// I + 1 tupp, if the left row is not returned
// I + 1 tupp, if this is a semi join. Rows from right are not returned.
//
// If this is not a semi join, then:
// I + 2 tupps, if instantiation is not done.
// I + 3 tupps, if instantiation is done only for Left Outer Join.
// I + 4 tupps, if instantiation is done only for Full Outer Join.
//
////////////////////////////////////////////////////////////////////////////
#pragma nowarn(1506) // warning elimination
returnedTuples = givenDesc->noTuples();
#pragma warn(1506) // warning elimination
if (leftOutputValIds.entries())
returnedLeftRowAtpIndex = returnedTuples++;
if ( rightOutputNeeded ) {
returnedRightRowAtpIndex = returnedTuples++;
if (nullInstantiatedOutput().entries() > 0)
returnedInstRowAtpIndex = returnedTuples++;
}
if (nullInstantiatedForRightJoinOutput().entries() > 0)
returnedInstRowForRightJoinAtpIndex = returnedTuples++;
/////////////////////////////////////////////////////////////////////////
// Regular Hybrid Hash Join Layout of WorkATP
// in all the computation below, the hashed row value ids are available
// in a temporary work atp. They are used from there to build the
// hash table, apply the after predicate, join predicate etc.
// Before returning from this proc, these value ids are moved to the
// map table that is being returned. The work atp contains the following
// entries:
// index what
// -------------------------------------------------------
// 0 constants
// 1 temporaries
// 2 a row from the left child
// 3 an "extended" row from the left child
// 4 a row from the right child
// 5 an "extended" row from the right child
// 6 another "extended" row from the right child
// 7 the calculated hash value
// 8 the instatiated right row for left join
// 9 the instatiated left row for right join
// 10/11/12 the previous input (in case of HT reuse)
/////////////////////////////////////////////////////////////////////////
leftRowAtpIndex = 2;
extLeftRowAtpIndex = 3;
rightRowAtpIndex = 4;
extRightRowAtpIndex1 = 5;
extRightRowAtpIndex2 = 6;
hashValueAtpIndex = 7;
numOfATPIndexes = 8;
if (nullInstantiatedOutput().entries() > 0)
instRowForLeftJoinAtpIndex = numOfATPIndexes++;
if (nullInstantiatedForRightJoinOutput().entries() > 0)
instRowForRightJoinAtpIndex = numOfATPIndexes++;
if(isReuse())
prevInputTuppIndex = numOfATPIndexes++;
// If using min/max optimization, add one more tuple for min/max values.
if(getMinMaxVals().entries())
minMaxValsAtpIndex = numOfATPIndexes++;
}
// If this HashJoin is doing min/max optimization, then generate the
// aggregate expressions to compute the Min and Max values.
if(getMinMaxVals().entries()) {
// Link in the map table for the right child values.
MapTable *lastMap = generator->getLastMapTable();
generator->appendAtEnd(myMapTable1);
// A List to contain the Min and Max aggregates
ValueIdList mmPairs;
for (CollIndex mmCol = 0; mmCol < getMinMaxCols().entries(); mmCol++) {
// Cast the value coming from the right child to the common type
// used to compute the Min/Max values.
//
ItemExpr *rightCol = getMinMaxCols()[mmCol].getItemExpr();
rightCol = new(generator->wHeap())
Cast(rightCol, getMinMaxVals()[(mmCol*2)].getType().newCopy(generator->wHeap()));
// The min and max aggregates
ItemExpr *minVal = new(generator->wHeap()) Aggregate(ITM_MIN, rightCol, FALSE);
ItemExpr *maxVal = new(generator->wHeap()) Aggregate(ITM_MAX, rightCol, FALSE);
minVal->bindNode(generator->getBindWA());
maxVal->bindNode(generator->getBindWA());
ValueId minId = minVal->getValueId();
ValueId maxId = maxVal->getValueId();
Attributes * mapAttr;
// Map the min aggregate to be the same as the min placeholder
// (system generated hostvar). Set the ATP/ATPIndex to refer to
// the min/max tuple in the workAtp
mapAttr = (generator->getMapInfo(getMinMaxVals()[(mmCol*2)]))->getAttr();
mapAttr = (generator->addMapInfo(minId, mapAttr))->getAttr();
mapAttr->setAtp(workAtpPos);
mapAttr->setAtpIndex(minMaxValsAtpIndex);
// Map the max aggregate to be the same as the min placeholder
// (system generated hostvar). Set the ATP/ATPIndex to refer to
// the min/max tuple in the workAtp
mapAttr = (generator->getMapInfo(getMinMaxVals()[((mmCol*2)+1)]))->getAttr();
mapAttr = (generator->addMapInfo(maxId, mapAttr))->getAttr();
mapAttr->setAtp(workAtpPos);
mapAttr->setAtpIndex(minMaxValsAtpIndex);
// Insert into list
mmPairs.insert(minId);
mmPairs.insert(maxId);
}
// Generate the min/max expression.
expGen->generateAggrExpr(mmPairs, ex_expr::exp_AGGR, &minMaxExpr,
0, true);
// No longer need the right child map table.
generator->unlinkNext(lastMap);
}
ex_cri_desc * returnedDesc = new(space) ex_cri_desc(returnedTuples, space);
// now the unchain/chain business described above. First, unchain the
// left child map table
generator->unlinkNext(myMapTable0);
// add the right child map table
generator->appendAtEnd(myMapTable1);
// and finaly add the left child map table
generator->appendAtEnd(myMapTable2);
// Here is how the map table list looks like now:
// MT -> MT0 -> MT1 -> RC MT -> MT2 -> MT3 -> LC MT
ex_cri_desc * workCriDesc = new(space) ex_cri_desc(numOfATPIndexes, space);
// This value was originally set inside generator by my parent exchange node
// as a global variable. Now need to reset the saveNumEsps value back into
// generator since codegen of my child(0) may have changed it.
generator->setNumESPs(saveNumEsps);
// make sure the expressions that are inserted into the hash table
// include the expression(s) to be hashed (this restriction may be
// lifted some day in the future, but in all "normal" cases this happens
// anyway)
LIST(CollIndex) hashKeyColumns(generator->wHeap());
////////////////////////////////////////////////////////////
// Before generating any expression for this node, set the
// the expression generation flag not to generate float
// validation PCode. This is to speed up PCode evaluation
////////////////////////////////////////////////////////////
generator->setGenNoFloatValidatePCode(TRUE);
////////////////////////////////////////////////////////////
// Generate the hash computation expression for the left
// and right children.
//
// The hash value is computed as:
// hash_value = Hash(col1) + Hash(col2) .... + Hash(colN)
////////////////////////////////////////////////////////////
ItemExpr * buildHashTree = NULL;
ItemExpr * probeHashTree = NULL;
// construct the rightHashExpr and leftHashExpr if
// it's a full outer join and the right rows
// have to be returned as well. During the probe
// for every left row, we mark the right row
// that matches. At the end (after returning all left rows for left joins)
// we go thro all the clusters and null instantiate the left row
// for every right row that is not marked. For this reason, we need
// the left and right rows into the cluster. For getting the row into a cluster
// we need the hashValue and hence rightHashExpr and leftHashExpr.
// TBD Hema
// ||
// (nullInstantiatedForRightJoinOutput().entries() > 0))
if (! getEquiJoinPredicates().isEmpty()) {
///////////////////////////////////////////////////////////////////////
// getEquiJoinPredicates() is a set containing predicates of the form:
// <left value1> '=' <right value1>, <left value2> '=' <right value2> ..
//
// The right values are the hash table key values
///////////////////////////////////////////////////////////////////////
for (valId = getEquiJoinPredicates().init();
getEquiJoinPredicates().next(valId);
getEquiJoinPredicates().advance(valId)) {
ItemExpr * itemExpr = valId.getItemExpr();
// call the pre-code generator to make sure we handle cases of
// more difficult type conversion, such as complex types.
itemExpr->preCodeGen(generator);
// get the left (probing) and the right (building) column value
// to be hashed and convert them to a common data type
ItemExpr * buildHashCol = itemExpr->child(1);
ItemExpr * probeHashCol = itemExpr->child(0);
// Search for the expression to be hashed in the build table and
// make a lookup table that links hash key columns to columns in
// the hash table. For now, if the expression to be hashed is not
// yet part of the hash table, then add it. This should only happen
// for join predicates involving expressions, e.g. t1.a = t2.b + 5.
// The lookup table and the list rightBufferValIds is later used when
// moving the building tuple into the hash table and for
// generating the build search expression.
NABoolean found = FALSE;
ValueId buildHashColValId = buildHashCol->getValueId();
for (CollIndex ix = 0; ix < rightBufferValIds.entries() AND NOT found; ix++) {
if (rightBufferValIds[ix] == buildHashColValId) {
// found the build hash column in the column layout of the
// hash table, remember that hash key column
// "hashKeyColumns.entries()" can be found in column "ix" of
// the hash table.
hashKeyColumns.insert(ix);
found = TRUE;
}
}
if (NOT found) {
// hash value is not yet contained in hash table, add it and
// remember that it is stored in column "rightOutputValIds.entries()"
hashKeyColumns.insert(rightBufferValIds.entries());
rightBufferValIds.insert(buildHashColValId);
}
// now finish the job by adding type conversion to a common type
// even for cases that use specialized comparison operators for
// slightly mismatching types (e.g. the above procedure will not
// create a type conversion operator for a comparison between a
// 16 bit integer and a 32 bit integer)
const NAType & buildType = buildHashCol->getValueId().getType();
const NAType & probeType = probeHashCol->getValueId().getType();
if (NOT (buildType == probeType)) {
// seems like the executor would use a special-purpose
// comparison operator between two different data types,
// but this isn't possible for this case where we hash
// both sides separately
// find the common datatype that fits both columns
UInt32 flags =
((CmpCommon::getDefault(LIMIT_MAX_NUMERIC_PRECISION) == DF_ON)
? NAType::LIMIT_MAX_NUMERIC_PRECISION : 0);
const NAType *resultType = buildType.synthesizeType(
SYNTH_RULE_UNION,
buildType,
probeType,
generator->wHeap(),
&flags);
CMPASSERT(resultType);
// matchScales() has been called in preCodeGen()
// add type conversion operators if necessary
if (NOT (buildType == *resultType)) {
buildHashCol = new(generator->wHeap()) Cast(buildHashCol,resultType);
}
if (NOT (probeType == *resultType)) {
probeHashCol = new(generator->wHeap()) Cast(probeHashCol,resultType);
}
}
NABoolean doCasesensitiveHash = FALSE;
if (buildType.getTypeQualifier() == NA_CHARACTER_TYPE &&
probeType.getTypeQualifier() == NA_CHARACTER_TYPE) {
const CharType &cBuildType = (CharType&)buildType;
const CharType &cProbeType = (CharType&)probeType;
if (cBuildType.isCaseinsensitive() != cProbeType.isCaseinsensitive())
doCasesensitiveHash = TRUE;
}
// make the hash function for the right value, to be hashed
// into the hash table while building the hash table.
buildHashCol->bindNode(generator->getBindWA());
BuiltinFunction * hashFunction =
new(generator->wHeap()) Hash(buildHashCol);
if (buildHashTree) {
buildHashTree = new(generator->wHeap()) HashComb(buildHashTree,
hashFunction);
}
else {
buildHashTree = hashFunction;
}
if (doCasesensitiveHash)
{
((Hash*)hashFunction)->setCasesensitiveHash(TRUE);
((HashComb*)buildHashTree)->setCasesensitiveHash(TRUE);
}
// make the hash function for the left value. This value is to
// be hashed into the hash table while probing the hash table.
hashFunction = new(generator->wHeap()) Hash(probeHashCol);
if (probeHashTree) {
probeHashTree = new(generator->wHeap()) HashComb(probeHashTree,
hashFunction);
}
else {
probeHashTree = hashFunction;
}
if (doCasesensitiveHash)
{
((Hash*)hashFunction)->setCasesensitiveHash(TRUE);
((HashComb*)probeHashTree)->setCasesensitiveHash(TRUE);
}
}
// hash value is an unsigned long. (compare typedef SimpleHashValue in
// common/BaseType.h). It could be made a bigger datatype,
// if need be.
buildHashTree = new (generator->wHeap())
Cast(buildHashTree, new (generator->wHeap()) SQLInt(generator->wHeap(), FALSE, FALSE));
probeHashTree = new (generator->wHeap())
Cast(probeHashTree, new (generator->wHeap()) SQLInt(generator->wHeap(), FALSE, FALSE));
buildHashTree->setConstFoldingDisabled(TRUE);
probeHashTree->setConstFoldingDisabled(TRUE);
// bind/type propagate the hash evaluation tree
buildHashTree->bindNode(generator->getBindWA());
probeHashTree->bindNode(generator->getBindWA());
// add the build root value id to the map table. This is the hash value.
Attributes * mapAttr;
mapAttr = (generator->addMapInfo(buildHashTree->getValueId(), 0))->
getAttr();
mapAttr->setAtp(workAtpPos);
mapAttr->setAtpIndex(hashValueAtpIndex);
ULng32 len;
ExpTupleDesc::computeOffsets(mapAttr,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT, len);
// add the probe root value id to the map table. This is the hash value.
mapAttr = (generator->addMapInfo(probeHashTree->getValueId(), 0))->
getAttr();
mapAttr->copyLocationAttrs(generator->
getMapInfo(buildHashTree->
getValueId())->getAttr());
// generate code to evaluate the hash expression
expGen->generateArithExpr(buildHashTree->getValueId(),
ex_expr::exp_ARITH_EXPR,
&rightHashExpr);
// generate the probe hash expression
expGen->generateArithExpr(probeHashTree->getValueId(),
ex_expr::exp_ARITH_EXPR,
&leftHashExpr);
};
// only the case of single column in the NOT IN is handled.
// these 2 expression (checkInnerNullExpr_ and checkOuterNullExpr_)
// will be empty for the multi-column case
// generate the check inner null expression
ex_expr *checkInnerNullExpression = 0;
if (!(getCheckInnerNullExpr().isEmpty()))
{
ItemExpr * newExprTree = getCheckInnerNullExpr().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newExprTree->getValueId(),ex_expr::exp_SCAN_PRED,
&checkInnerNullExpression);
}
// generate the check outer null expression
ex_expr *checkOuterNullExpression = 0;
if (!(getCheckOuterNullExpr().isEmpty()))
{
ItemExpr * newExprTree = getCheckOuterNullExpr().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newExprTree->getValueId(),ex_expr::exp_SCAN_PRED,
&checkOuterNullExpression);
}
// allocate two map tables, so that we later can remove the left childs map
// table
MapTable * myMapTable4 = generator->appendAtEnd();
MapTable * myMapTable5 = generator->appendAtEnd();
// MT -> MT0 -> MT1 -> RC MT -> MT2 -> MT3 -> LC MT -> MT4 -> MT5
//determine the tuple format and whether we want to resize rows or not
// base the decision on the right side and left side
NABoolean bmo_affinity = (CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT_BMO_AFFINITY) == DF_ON);
NABoolean resizeCifRecord = FALSE;
ExpTupleDesc::TupleDataFormat tupleFormat ;
NABoolean considerBufferDefrag = FALSE;
if (! bmo_affinity &&
getCachedTupleFormat() != ExpTupleDesc::UNINITIALIZED_FORMAT &&
CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM &&
CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT_BMO) == DF_SYSTEM)
{
resizeCifRecord = getCachedResizeCIFRecord();
tupleFormat = getCachedTupleFormat();
considerBufferDefrag = getCachedDefrag() && resizeCifRecord;
}
else
{
tupleFormat = determineInternalFormat( rightBufferValIds,
leftOutputValIds,
this,
resizeCifRecord,
generator,
bmo_affinity,
considerBufferDefrag,
useUniqueHashJoin);
considerBufferDefrag = considerBufferDefrag && resizeCifRecord;
}
// generate the rightMoveInExpr
ValueIdList rightMoveTargets1;
MapTable *rightExtValMapTable = NULL;
ULng32 rightRowLength = 0;
if (rightBufferValIds.entries() > 0) {
// Offsets are based on the row starting after the HashRow structure.
expGen->generateContiguousMoveExpr(rightBufferValIds,
-1, // add convert nodes
workAtpPos,
extRightRowAtpIndex1,
tupleFormat,
rightRowLength,
&rightMoveInExpr,
0, // tuple descriptor
ExpTupleDesc::SHORT_FORMAT,
&rightExtValMapTable,
&rightMoveTargets1);
};
ULng32 extRightRowLength = rightRowLength + sizeof(HashRow);
// MT -> MT0 -> MT1 -> RC MT -> MT2 -> MT3 -> LC MT
// -> MT4 -> MT5 -> ERC (new valIds)
// remove the map table from the right child. We don't need it anymore
// first un-chain the left child's map table
generator->unlinkNext(myMapTable2);
// now delete the right child's map table
// This will delete MT1 -> RC MT and MT2, so we cannot reference those
// any more..
generator->removeAll(myMapTable0);
// make sure we cannot reference them anymore..
myMapTable1 = NULL;
myMapTable2 = NULL;
// and append the left childs map table again
generator->appendAtEnd(myMapTable3);
// MT -> MT0 -> MT3 -> LC MT -> MT4 -> MT5 -> ERC (new valIds)
// generate leftMoveExpr to move a left child row directly to the
// parents buffer
ULng32 leftRowLength = 0;
if (leftOutputValIds.entries() > 0) {
expGen->generateContiguousMoveExpr(leftOutputValIds,
-1,
workAtpPos,
leftRowAtpIndex,
tupleFormat,
leftRowLength,
&leftMoveExpr);
// get rid of the map table which was just appended by the last call
generator->removeLast();
};
// MT -> MT0 -> MT3 -> LC MT -> MT4 -> MT5 -> ERC (new valIds)
// generate the leftMoveInExpr
ValueIdList leftMoveTargets;
MapTable *leftExtValMapTable = NULL;
if (leftOutputValIds.entries() > 0) {
// Offsets are based on the row starting after the HashRow structure.
expGen->generateContiguousMoveExpr(leftOutputValIds,
-1, // add convert nodes
workAtpPos,
extLeftRowAtpIndex,
tupleFormat,
leftRowLength,
&leftMoveInExpr,
0,
ExpTupleDesc::SHORT_FORMAT,
&leftExtValMapTable,
&leftMoveTargets);
};
ULng32 extLeftRowLength = leftRowLength + sizeof(HashRow);
// MT -> MT0 -> MT3 -> LC MT -> MT4 -> MT5
// -> ERC (new valIds) -> ELC (new valIds)
// add the map table of the "extended" right row
if(rightExtValMapTable) {
generator->appendAtEnd(rightExtValMapTable);
}
// MT -> MT0 -> MT3 -> LC MT -> MT4 -> MT5
// -> ERC (new valIds) -> ELC (new valIds) -> ERC (old Ids)
// generate probeSearchExpr1
if (! getEquiJoinPredicates().isEmpty()) {
ItemExpr * newPredTree;
newPredTree = getEquiJoinPredicates().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&probeSearchExpr1);
}
// generate beforeJoinPred1
if (! joinPred().isEmpty()) {
ItemExpr * newPredTree;
newPredTree = joinPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&beforeJoinPred1);
}
// generate afterJoinPred1
if (! selectionPred().isEmpty()) {
ItemExpr * newPredTree;
newPredTree = selectionPred().rebuildExprTree(ITM_AND,TRUE, TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&afterJoinPred1);
}
// MT -> MT0 -> MT3 -> LC MT -> MT4 -> MT5
// -> ERC (new valIds) -> ELC (new valIds) -> ERC (old Ids)
// remove MapTable for left child row. First un-chain the extended
// right row tupps
generator->unlinkNext(myMapTable4);
// now we can savely delete the MapTable for the left child row
generator->unlinkNext(myMapTable0);
// add the MapTable for the "extended" right row again
generator->appendAtEnd(myMapTable5);
// For unique Hash Join, there is no leftExtValMapTable.
//
if(!useUniqueHashJoin) {
// add the MapTable for the "extended" left row
// if it exists. It will not exist for Unique Hash Join
generator->appendAtEnd(leftExtValMapTable);
}
// MT -> MT0 -> MT5 -> ERC (new valIds) -> ELC (new valIds)
// -> ERC (old Ids) -> ELC (old Ids)
if (! getEquiJoinPredicates().isEmpty()) {
// Generate rightSearchExpr for the build table. This expression
// compares two "extended" right rows. The MapTable contains
// only one of these rows (extRightRowAtpIndex1). To get the
// MapTable and ValueIdList for the second row, we go thru the
// rightBufferValIds and create new itemexpressions from this list.
ValueIdList rightMoveTargets2;
CollIndex i = 0;
for (i = 0; i < rightBufferValIds.entries(); i++) {
// create the new item xpression
ItemExpr * newCol = new(generator->wHeap())
NATypeToItem((NAType *)&(rightBufferValIds[i].getType()));
newCol->synthTypeAndValueId();
// copy the attributes from the first entended row
Attributes * originalAttr =
generator->getMapInfo(rightMoveTargets1[i])->getAttr();
Attributes * newAttr =
generator->addMapInfo(newCol->getValueId(), 0)->getAttr();
newAttr->copyLocationAttrs(originalAttr);
// only atpindex is different
newAttr->setAtpIndex(extRightRowAtpIndex2);
// add the new valueId to the list of movetargets
rightMoveTargets2.insert(newCol->getValueId());
};
// MT -> MT0 -> MT5 -> ERC (new valIds) -> ELC (new valIds)
// -> ERC (old Ids) -> ELC (old Ids) -> ERC2 (new valIds)
ValueIdSet searchExpr;
for (i = 0; i < hashKeyColumns.entries(); i++) {
// hashKeyColumns[i] remembers which column in the hash table
// the i-th hash key column is.
CollIndex hashColNum = hashKeyColumns[i];
ItemExpr *eqNode =
new(generator->wHeap()) BiRelat(ITM_EQUAL,
rightMoveTargets1[hashColNum].getItemExpr(),
rightMoveTargets2[hashColNum].getItemExpr(),
// specialNulls == TRUE means that the right search
// expression would treat NULL values as identical (when
// a new row is inserted into the hash-table); hence such
// rows would be chained.
// Note: NULL values in the hash table are treated as
// non-identical by the probe search expressions (when
// probing with left rows); i.e., the above chain of right
// rows with NULLs would never be matched!
TRUE);
eqNode->bindNode(generator->getBindWA());
// collect all the comparison preds in a value id set
searchExpr += eqNode->getValueId();
}
// AND the individual parts of the search expression together and
// generate the expression (note that code for the build table columns
// and for the move targets has been generated before, only the
// comparison code itself should be generated here)
ItemExpr * newPredTree = searchExpr.rebuildExprTree(ITM_AND,TRUE,TRUE);
newPredTree->bindNode(generator->getBindWA());
expGen->generateExpr(newPredTree->getValueId(),
ex_expr::exp_SCAN_PRED,
&rightSearchExpr);
}
// The Unique Hash Join does not use the probeSearchExpr2 since it
// does not support Overflow
//
if(!useUniqueHashJoin) {
// generate probeSearchExpr2
if (! getEquiJoinPredicates().isEmpty()) {
ItemExpr * newPredTree;
newPredTree = getEquiJoinPredicates().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&probeSearchExpr2);
}
}
// generate beforeJoinPred2
if (! joinPred().isEmpty()) {
ItemExpr * newPredTree;
newPredTree = joinPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&beforeJoinPred2);
}
// generate afterJoinPred2
if (! selectionPred().isEmpty()) {
ItemExpr * newPredTree;
newPredTree = selectionPred().rebuildExprTree(ITM_AND,TRUE, TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&afterJoinPred2);
}
// generate the rightMoveOutExpr
rightRowLength = 0;
MapTable * rightValMapTable = NULL;
Int32 bulkMoveOffset = -2; // try to generate a bulk move
if ( rightOutputValIds.entries() > 0 && rightOutputNeeded ) {
ValueIdList *rmo;
if(tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) {
// For aligned rows, we cannot return just the prefix of the row.
// This is because the columns in the row may have beed rearraged and
// the values may not all be at fixed offsets.
//
// So for aligned rows, return the whole right buffer.
rmo = &rightBufferValIds;
} else {
// For exploded rows, we can return just the prefix.
// So, return just the values that are needed above.
rmo = &rightOutputValIds;
}
expGen->generateContiguousMoveExpr(*rmo,
-1, // add convert nodes
workAtpPos,
rightRowAtpIndex,
tupleFormat,
rightRowLength,
&rightMoveOutExpr,
0,
ExpTupleDesc::SHORT_FORMAT,
&rightValMapTable,
NULL, // target Value Id List
0, // start offset
&bulkMoveOffset);
}
// generate the leftMoveOutExpr
MapTable * leftValMapTable = NULL;
if (leftOutputValIds.entries() > 0) {
expGen->generateContiguousMoveExpr(leftOutputValIds,
-1, // add convert nodes
workAtpPos,
leftRowAtpIndex,
tupleFormat,
leftRowLength,
&leftMoveOutExpr,
0,
ExpTupleDesc::SHORT_FORMAT,
&leftValMapTable);
}
// remove the MapTables describing the extended rows
if (rightExtValMapTable) {
// Unlinks leftExtValMapTable
generator->unlinkNext(rightExtValMapTable);
} else {
// If we do not have a rightExtValMapTable, the code below will remove
// the leftExtValMapTable, and whatever follows, from the main maptable
// chain.
// If we don't do this the removeAll() 4 lines below, will delete the
// leftExtValMapTable (if it existed) and we need it later on..
if (leftExtValMapTable) {
// Note that leftExtValMapTable may now have ERC2 (new valIds) as a child
// at this point....
// If rightExtValMapTable, existsd we now are on a separate chain...
generator->unlinkMe(leftExtValMapTable);
}
}
// At this poin we have something like this..
// MT -> MT0 -> MT5 -> ERC (new valIds) -> ELC (new valIds)
// -> ERC (old Ids) -> ELC (old Ids) -> XX potentialy more nodes here XX
// and everything from MT5 and onwards will now be deleted!!
generator->removeAll(myMapTable0);
myMapTable5 = NULL; // make sure we cannot use the stale data anymore..
rightExtValMapTable = NULL; // make sure we cannot use the stale data anymore.
// Here is how the map table list looks like now:
// MT -> MT0
ULng32 instRowLength = 0;
ULng32 instRowForRightJoinLength = 0;
MapTable *instNullForLeftJoinMapTable = NULL;
// add MapTable for the right row
if ( rightOutputNeeded ) {
generator->appendAtEnd(rightValMapTable);
// Here is how the map table list looks like now:
// MT -> MT0 -> RV
// generate nullInstExpr. instantiateValuesForLeftJoin generates
// 2 expressions. The first one is to evaluate the right expression
// and move the result into the null instantiated row. The second one
// is to initialize the instantiated row with null values.
// instantiateValuesForLeftJoin also adds info for the instatiated
// null row to the MapTable.
if (nullInstantiatedOutput().entries() > 0) {
instantiateValuesForLeftJoin(generator,
workAtpPos, instRowForLeftJoinAtpIndex,
&leftJoinExpr, &nullInstForLeftJoinExpr,
&instRowLength,
&instNullForLeftJoinMapTable,
tupleFormat);
};
// Check point.
if (isLeftJoin() && !selectionPred().isEmpty())
{
MapTable * myMapTableX = generator->appendAtEnd();
// Add back the left map table temporarily for generating the sel pred.
// for Phase 2.
generator->appendAtEnd(myMapTable3);
// XXX -> MT3 -> LC MT -> MT4 -> MT5 -> ERC (new valIds)
// generate afterJoinPred3
ItemExpr * newPredTree;
newPredTree = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&afterJoinPred3);
// myMapTable3 may be needed later on, unlink before we remove myMapTableX
// For those that might use myMapTable3 below, beware that the chain
// starting with myMapTable3 now may have additional map nodes appended
// as a result of the call to generateExpr() above, as compared to what
// looked like before when we unlinked myMapTable3 from the main chain.
// At this point the myMapTable3 chain will look something like this:
// MT3 -> LC MT -> MT4 -> MT5 -> ERC (new valIds) -> XX
// where XX represents whatever mapTables got added above
generator->unlinkMe(myMapTable3);
// This is how the check point is made use of.
generator->removeAll(myMapTableX);
// For Left Joins (including full joins), generate afterJoinPred4
//
{
// Add back the left extended map table temporarily for
// generating the selection predicate for Phase 3.
generator->appendAtEnd(leftExtValMapTable);
// generate afterJoinPred4
newPredTree = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newPredTree->getValueId(),
ex_expr::exp_SCAN_PRED,
&afterJoinPred4);
// This is how the check point is made use of.
generator->removeAll(myMapTableX);
// we just deleted leftExtValMapTable...
// make sure we don't reference it
leftExtValMapTable = NULL;
}
generator->removeLast(); // It should actually remove myMapTableX.
} //if (isLeftJoin() && !selectionPred().isEmpty())
// set the atp for right row values back to 0.
// Set the atp_index to the last returned tupp.
if (rightOutputValIds.entries()) {
for (CollIndex ix = 0; ix < rightOutputValIds.entries(); ix++) {
valId = rightOutputValIds[ix];
// do this only, if the valueId is not input to this node
if (NOT getGroupAttr()->getCharacteristicInputs().contains(valId)) {
MapInfo * map_info = generator->getMapInfoAsIs(valId);
if (map_info) {
Attributes * attr = map_info->getAttr();
attr->setAtp(0);
attr->setAtpIndex(returnedRightRowAtpIndex);
};
};
};
};
// set the atp index for values in the instantiated row.
if (nullInstantiatedOutput().entries()) {
for (CollIndex ix = 0; ix < nullInstantiatedOutput().entries(); ix++) {
valId = nullInstantiatedOutput()[ix];
Attributes * attr = generator->getMapInfo(valId)->getAttr();
attr->setAtp(0);
attr->setAtpIndex(returnedInstRowAtpIndex);
}
};
}; // if ( rightOutputNeeded )
if(useUniqueHashJoin) {
// Add the MapTable for the left child
// unique Hash Join passes the left child values as is (copyAtp()).
//
generator->appendAtEnd(myMapTable3);
} else {
// add the MapTable for the left row
//
generator->appendAtEnd(leftValMapTable);
}
// Here is how the map table list looks like now:
// MT -> MT0 -> RV -> LV
/***************** Generate the nullInstForRightJoinExprs *************/
// generate nullInstForRightJoinExpr. instantiateValuesForRightJoin
// generates 2 expressions. The first one is to evaluate the right
// expression and move the result into the null instantiated row.
// The second one is to initialize the instantiated row with null values.
// instantiateValuesForRightJoin also adds info for the instatiated
// null row to the MapTable.
if (nullInstantiatedForRightJoinOutput().entries() > 0) {
instantiateValuesForRightJoin(generator,
workAtpPos, instRowForRightJoinAtpIndex,
&rightJoinExpr, &nullInstForRightJoinExpr,
&instRowForRightJoinLength,
NULL, // Don't need a MapTable back. At
// this point, we have generated all
// the necessary expressions. This
// code is here to be consistent with the
// this one's counterpart
// - instantiateValuesForLeftJoin
tupleFormat);
} // nullInstantiatedForRightJoinOutput()
if (isRightJoin()&& !selectionPred().isEmpty())
{
// Use the check point technique that is used in
// generating the afterJoinPred3 & afterJoinPred4
// for isLeftJoin()
MapTable * myMapTableX = generator->appendAtEnd();
// add the null instantitated columns maptable back
// to generate the after join selection predicted.
// For this expression, the values should all be
// available at instNullForLeftJoinMapTable and
// instRowForLeftJoinAtpIndex
generator->appendAtEnd(instNullForLeftJoinMapTable);
// generate afterJoinPred5
// We only need one predicate here, since the nullInstantiated
// versions of both the left row and the right row are
// available in their respective nullInstantiated tupp.
// Note that the left join case will needs both afterJoinPred3 and
// afterJoinPred4.
ItemExpr * newPredTree;
newPredTree = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&afterJoinPred5);
// This is how the check point is made use of.
generator->removeAll(myMapTableX);
// We just deleted instNullForLeftJoinMapTable, make sure we don't use
// it any more..
instNullForLeftJoinMapTable = NULL;
generator->removeLast(); // It should actually remove myMapTableX.
}
// set the atp for the left row values back to 0
if (leftOutputValIds.entries()) {
for (CollIndex ix = 0; ix < leftOutputValIds.entries(); ix++) {
valId = leftOutputValIds[ix];
MapInfo * map_info =
generator->getMapInfoFromThis(leftValMapTable, valId);
if (map_info) {
Attributes * attr = map_info->getAttr();
attr->setAtp(0);
attr->setAtpIndex(returnedLeftRowAtpIndex);
};
};
};
// set the atp index for values in the instantiated row.
if (nullInstantiatedForRightJoinOutput().entries()) {
for (CollIndex ix = 0; ix < nullInstantiatedForRightJoinOutput().entries(); ix++) {
valId = nullInstantiatedForRightJoinOutput()[ix];
Attributes * attr = generator->getMapInfo(valId)->getAttr();
attr->setAtp(0);
attr->setAtpIndex(returnedInstRowForRightJoinAtpIndex);
}
};
// determine the expected size of the inner table
Cardinality innerExpectedRows = (Cardinality) child(1)->getGroupAttr()->
getOutputLogPropList()[0]->getResultCardinality().value();
// If this HJ is performed within ESPs, then number of records
// processed by each ESP is a subset of total records.
// Inner side -- divide only for type 1 join !! (type 2 join sends all the
// rows to each ESP, and for the rest we assume the same as a worst case).
if ( saveNumEsps > 0 && getParallelJoinType() == 1 )
innerExpectedRows /= (Cardinality) saveNumEsps ;
// determine the expected size of the outer table
Cardinality outerExpectedRows = (Cardinality) child(0)->getGroupAttr()->
getOutputLogPropList()[0]->getResultCardinality().value();
// If this HJ is performed within ESPs, then number of records
// processed by each ESP is a subset of total records.
if ( saveNumEsps > 0 )
outerExpectedRows /= (Cardinality) saveNumEsps ;
// determine the size of the HJ buffers. This hash buffer is used to store
// the incoming (inner or outer) rows, and may be written to disk (overflow)
// first determine the minimum size for the hash table buffers.
// a buffer has to store at least one extended inner or outer row
// plus overhead such as hash buffer header etc
ULng32 minHBufferSize = MAXOF(extLeftRowLength, extRightRowLength) +
ROUND8(sizeof(HashBufferHeader)) + 8;
// determine the minimum result sql buffer size (may need to store result
// rows comprising of both inner and outer incoming rows)
ULng32 minResBufferSize = leftRowLength + sizeof(tupp_descriptor);
if ( rightOutputNeeded )
minResBufferSize += rightRowLength + sizeof(tupp_descriptor);
// get the default value for the (either hash or result) buffer size
ULng32 bufferSize = (ULng32) getDefault(GEN_HSHJ_BUFFER_SIZE);
// currently the default hash buffer size is 56K (DP2 can not take anything
// larger), so if this Hash-Join may overflow, and the input row exceeds
// this size, we issue an error. If overflow is not allowed, then we may
// resize the hash buffer to accomodate the larger input row.
ULng32 hashBufferSize = bufferSize ;
if ( minHBufferSize > bufferSize ) {
// On linux we can handle any size of overflow buffer
hashBufferSize = minHBufferSize ; // use a larger Hash-Buffer
}
// adjust up the result buffer size, if needed
bufferSize = MAXOF(minResBufferSize, bufferSize);
// determione the memory usage (amount of memory as percentage from total
// physical memory used to initialize data structures)
unsigned short memUsagePercent =
(unsigned short) getDefault(BMO_MEMORY_USAGE_PERCENT);
// determine the size of the up queue. We should be able to keep at least
// result buffer worth od data in the up queue
queue_index upQueueSize = (queue_index)(bufferSize / minResBufferSize);
// we want at least 4 entries in the up queue
upQueueSize = MAXOF(upQueueSize,(queue_index) 4);
// the default entry might be even larger
upQueueSize = MAXOF(upQueueSize, (queue_index)getDefault(GEN_HSHJ_SIZE_UP));
// Support for a RE-USE of the hash table, in case the input values for
// the right/inner child are the same as in the previous input.
ex_expr * moveInputExpr = 0;
ex_expr * checkInputPred = 0;
ULng32 inputValuesLen = 0;
if ( isReuse() ) { // only if testing is needed
// Create a copy of the input values. This set represent the saved input
// values which are compared to the incoming values.
// Generate expression to move the relevant input values
if (! moveInputValues().isEmpty() ) {
ValueIdList vid_list( moveInputValues() );
expGen->generateContiguousMoveExpr(vid_list,
0, // dont add conv nodes
workAtpPos,
prevInputTuppIndex,
tupleFormat,
inputValuesLen, &moveInputExpr);
}
// generate expression to see if the relevant input values have changed.
// If changed, then we need to rebuild the hash table.
if (! checkInputValues().isEmpty() ) {
ItemExpr * newPredTree =
checkInputValues().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&checkInputPred);
}
}
if(useUniqueHashJoin) {
// The unique hash join does not use the rightMoveOutExpr,
// however, it was generated to get the mapTable (could use
// processAttributes()).
// Set it to NULL here.
//
rightMoveOutExpr = NULL;
rightRowAtpIndex = -1;
// The Unique Hash Join does not have an extended left row.
// However, extLeftRowLength was set to a non-zero value above.
//
extLeftRowLength = 0;
// Check to make sure things are as we expect for the unique hash join
//
GenAssert(!rightMoveOutExpr, "Bad Unique Hash Join: rightMoveOutExpr");
GenAssert(!leftMoveExpr, "Bad Unique Hash Join: leftMoveExpr");
GenAssert(!leftMoveInExpr, "Bad Unique Hash Join: leftMoveInExpr");
GenAssert(!leftMoveOutExpr, "Bad Unique Hash Join: leftMoveOutExpr");
GenAssert(!probeSearchExpr2, "Bad Unique Hash Join: probeSearchExpr2");
GenAssert(!leftJoinExpr, "Bad Unique Hash Join: leftJoinExpr");
GenAssert(!nullInstForLeftJoinExpr,
"Bad Unique Hash Join: nullInstForLeftJoinExpr");
GenAssert(!beforeJoinPred1, "Bad Unique Hash Join: beforeJoinPred1");
GenAssert(!beforeJoinPred2, "Bad Unique Hash Join: beforeJoinPred2");
GenAssert(!afterJoinPred1, "Bad Unique Hash Join: afterJoinPred1");
GenAssert(!afterJoinPred2, "Bad Unique Hash Join: afterJoinPred2");
GenAssert(!afterJoinPred3, "Bad Unique Hash Join: afterJoinPred3");
GenAssert(!afterJoinPred4, "Bad Unique Hash Join: afterJoinPred4");
GenAssert(leftRowLength == 0, "Bad Unique Hash Join: leftRowLength");
GenAssert(extLeftRowLength == 0, "Bad Unique Hash Join: extLeftRowLength");
GenAssert(instRowLength == 0, "Bad Unique Hash Join: instRowLength");
GenAssert(leftRowAtpIndex == -1, "Bad Unique Hash Join: leftRowAtpIndex");
GenAssert(rightRowAtpIndex == -1, "Bad Unique Hash Join: rightRowAtpIndex");
GenAssert(instRowForLeftJoinAtpIndex == -1,
"Bad Unique Hash Join: instRowForLeftJoinAtpIndex");
GenAssert(returnedLeftRowAtpIndex == -1,
"Bad Unique Hash Join: returnedLeftRowAtpIndex");
GenAssert(returnedInstRowAtpIndex == -1,
"Bad Unique Hash Join: returnedInstRowAtpIndex");
GenAssert(!rightJoinExpr, "Bad Unique Hash Join: rightJoinExpr");
GenAssert(!nullInstForRightJoinExpr,
"Bad Unique Hash Join: nullInstForRightJoinExpr");
GenAssert(instRowForRightJoinAtpIndex == -1,
"Bad Unique Hash Join: instRowForRightJoinAtpIndex");
GenAssert(returnedInstRowForRightJoinAtpIndex == -1,
"Bad Unique Hash Join: returnedInstRowForRightJoinAtpIndex");
GenAssert(instRowForRightJoinLength == 0,
"Bad Unique Hash Join: instRowForRightJoinLength");
}
short scrthreshold = (short) CmpCommon::getDefaultLong(SCRATCH_FREESPACE_THRESHOLD_PERCENT);
short hjGrowthPercent =
getGroupAttr()->getOutputLogPropList()[0]->getBMOgrowthPercent();
// now we have generated all the required expressions and the MapTable
// reflects the returned rows. Let's generate the hash join TDB now
ComTdbHashj * hashj_tdb =
new(space) ComTdbHashj(leftChildTdb,
rightChildTdb,
givenDesc,
returnedDesc,
rightHashExpr,
rightMoveInExpr,
rightMoveOutExpr,
rightSearchExpr,
leftHashExpr,
leftMoveExpr,
leftMoveInExpr,
leftMoveOutExpr,
probeSearchExpr1,
probeSearchExpr2,
leftJoinExpr,
nullInstForLeftJoinExpr,
beforeJoinPred1,
beforeJoinPred2,
afterJoinPred1,
afterJoinPred2,
afterJoinPred3,
afterJoinPred4,
afterJoinPred5,
checkInputPred,
moveInputExpr,
(Lng32)inputValuesLen,
prevInputTuppIndex,
rightRowLength,
extRightRowLength,
leftRowLength,
extLeftRowLength,
instRowLength,
workCriDesc,
leftRowAtpIndex,
extLeftRowAtpIndex,
rightRowAtpIndex,
extRightRowAtpIndex1,
extRightRowAtpIndex2,
hashValueAtpIndex,
instRowForLeftJoinAtpIndex,
returnedLeftRowAtpIndex,
returnedRightRowAtpIndex,
returnedInstRowAtpIndex,
memUsagePercent,
(short)getDefault(GEN_MEM_PRESSURE_THRESHOLD),
scrthreshold,
(queue_index)getDefault(GEN_HSHJ_SIZE_DOWN),
upQueueSize,
isSemiJoin(),
isLeftJoin(),
isAntiSemiJoin(),
useUniqueHashJoin,
(isNoOverflow() ||
(CmpCommon::getDefault(EXE_BMO_DISABLE_OVERFLOW)
== DF_ON)),
isReuse(),
(Lng32)getDefault(GEN_HSHJ_NUM_BUFFERS),
bufferSize,
hashBufferSize,
(Cardinality) getGroupAttr()->
getOutputLogPropList()[0]->
getResultCardinality().value(),
innerExpectedRows,
outerExpectedRows,
isRightJoin(),
rightJoinExpr,
nullInstForRightJoinExpr,
instRowForRightJoinAtpIndex,
returnedInstRowForRightJoinAtpIndex,
instRowForRightJoinLength,
// To get the min number of buffers per a flushed
// cluster before is can be flushed again
(unsigned short)
getDefault(EXE_NUM_CONCURRENT_SCRATCH_IOS)
+ (short)getDefault(COMP_INT_66), // for testing
(UInt32) getDefault(COMP_INT_67), // for batch num
//+ (short)getDefault(COMP_INT_66), // for testing
checkInnerNullExpression,
checkOuterNullExpression,
hjGrowthPercent,
// For min/max optimization.
minMaxValsAtpIndex,
minMaxRowLength,
minMaxExpr,
leftDownDesc
);
generator->initTdbFields(hashj_tdb);
hashj_tdb->setOverflowMode(generator->getOverflowMode());
if (CmpCommon::getDefault(EXE_BMO_SET_BUFFERED_WRITES) == DF_ON)
hashj_tdb->setBufferedWrites(TRUE);
if (CmpCommon::getDefault(EXE_DIAGNOSTIC_EVENTS) == DF_ON)
hashj_tdb->setLogDiagnostics(TRUE);
if (useUniqueHashJoin || // UHJ avoids check for early overflow
CmpCommon::getDefault(EXE_BMO_DISABLE_CMP_HINTS_OVERFLOW_HASH) == DF_ON
// If CQD value is SYSTEM, then no compiler hints checks for HDD
||
(((generator->getOverflowMode() == ComTdb::OFM_DISK ) ||
( generator->getOverflowMode() == ComTdb::OFM_MMAP))
&&
CmpCommon::getDefault(EXE_BMO_DISABLE_CMP_HINTS_OVERFLOW_HASH) ==
DF_SYSTEM ))
hashj_tdb->setDisableCmpHintsOverflow(TRUE);
hashj_tdb->setBmoMinMemBeforePressureCheck((Int16)getDefault(EXE_BMO_MIN_SIZE_BEFORE_PRESSURE_CHECK_IN_MB));
if(generator->getOverflowMode() == ComTdb::OFM_SSD )
hashj_tdb->setBMOMaxMemThresholdMB((UInt16)(ActiveSchemaDB()->
getDefaults()).
getAsLong(SSD_BMO_MAX_MEM_THRESHOLD_IN_MB));
else
hashj_tdb->setBMOMaxMemThresholdMB((UInt16)(ActiveSchemaDB()->
getDefaults()).
getAsLong(EXE_MEMORY_AVAILABLE_IN_MB));
hashj_tdb->setScratchIOVectorSize((Int16)getDefault(SCRATCH_IO_VECTOR_SIZE_HASH));
hashj_tdb->
setForceOverflowEvery((UInt16)(ActiveSchemaDB()->
getDefaults()).
getAsULong(EXE_TEST_HASH_FORCE_OVERFLOW_EVERY));
hashj_tdb->
setForceHashLoopAfterNumBuffers((UInt16)(ActiveSchemaDB()->
getDefaults()).
getAsULong(EXE_TEST_FORCE_HASH_LOOP_AFTER_NUM_BUFFERS));
hashj_tdb->
setForceClusterSplitAfterMB((UInt16) (ActiveSchemaDB()->getDefaults()).
getAsULong(EXE_TEST_FORCE_CLUSTER_SPLIT_AFTER_MB));
((ComTdb*)hashj_tdb)->setCIFON((tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT));
// The CQD EXE_MEM_LIMIT_PER_BMO_IN_MB has precedence over the mem quota sys
NADefaults &defs = ActiveSchemaDB()->getDefaults();
UInt16 mmu = (UInt16)(defs.getAsDouble(EXE_MEM_LIMIT_PER_BMO_IN_MB));
UInt16 numBMOsInFrag = (UInt16)generator->getFragmentDir()->getNumBMOs();
double memQuota = 0;
if (mmu != 0) {
memQuota = mmu;
hashj_tdb->setMemoryQuotaMB(mmu);
} else {
// Apply quota system if either one the following two is true:
// 1. the memory limit feature is turned off and more than one BMOs
// 2. the memory limit feature is turned on
NABoolean mlimitPerCPU = defs.getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0;
if ( mlimitPerCPU || numBMOsInFrag > 1 ||
(numBMOsInFrag == 1 && CmpCommon::getDefault(EXE_SINGLE_BMO_QUOTA) == DF_ON)) {
memQuota =
computeMemoryQuota(generator->getEspLevel() == 0,
mlimitPerCPU,
generator->getBMOsMemoryLimitPerCPU().value(),
generator->getTotalNumBMOsPerCPU(),
generator->getTotalBMOsMemoryPerCPU().value(),
numBMOsInFrag,
generator->getFragmentDir()->getBMOsMemoryUsage()
);
Lng32 hjMemoryLowbound = defs.getAsLong(EXE_MEMORY_LIMIT_LOWER_BOUND_HASHJOIN);
if ( memQuota < hjMemoryLowbound )
memQuota = hjMemoryLowbound;
hashj_tdb->setMemoryQuotaMB( UInt16(memQuota) );
}
}
if (beforeJoinPredOnOuterOnly())
hashj_tdb->setBeforePredOnOuterOnly();
generator->addToTotalOverflowMemory(
getEstimatedRunTimeOverflowSize(memQuota)
);
double hjMemEst = getEstimatedRunTimeMemoryUsage(hashj_tdb);
generator->addToTotalEstimatedMemory(hjMemEst);
generator->addToTotalOverflowMemory(
getEstimatedRunTimeOverflowSize(memQuota)
);
if ( generator->getRightSideOfFlow() )
hashj_tdb->setPossibleMultipleCalls(TRUE);
// Internal CQD -- if set, enforce a minimum number of clusters
UInt16 nc =
(UInt16)(ActiveSchemaDB()->
getDefaults()).getAsDouble(EXE_HJ_MIN_NUM_CLUSTERS);
if (nc != 0)
hashj_tdb->setNumClusters(nc);
hashj_tdb->setMemoryContingencyMB(getDefault(PHY_MEM_CONTINGENCY_MB));
float bmoCtzFactor;
defs.getFloat(BMO_CITIZENSHIP_FACTOR, bmoCtzFactor);
hashj_tdb->setBmoCitizenshipFactor((Float32)bmoCtzFactor);
Lng32 hjMemEstInKBPerCPU = (Lng32)(hjMemEst / 1024) ;
hjMemEstInKBPerCPU = hjMemEstInKBPerCPU/
(MAXOF(generator->compilerStatsInfo().dop(),1));
hashj_tdb->setHjMemEstInMbPerCpu
( Float32(MAXOF(hjMemEstInKBPerCPU/1024,1)) );
// For now, use variable for all CIF rows based on resizeCifRecord
if(resizeCifRecord){ //tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) {
hashj_tdb->setUseVariableLength();
if(considerBufferDefrag)
{
hashj_tdb->setConsiderBufferDefrag();
}
}
if(!generator->explainDisabled()) {
generator->setOperEstimatedMemory(hjMemEstInKBPerCPU);
generator->setExplainTuple(
addExplainInfo(hashj_tdb, leftExplainTuple, rightExplainTuple, generator));
generator->setOperEstimatedMemory(0);
}
hashj_tdb->setReturnRightOrdered( returnRightOrdered() );
// Only for anti-semi-join with no search expr
// Make a guess about the likelihood of any row from the right, in which case
// delay requesting left rows, that are probably then not needed.
hashj_tdb->setDelayLeftRequest( innerExpectedRows > 100 ||
outerExpectedRows > 100000 );
// If using the min/max optimization, must delay the left request.
// This is because we send the min/max values with the left request
// and they are not available until phase1 is complete.
if(minMaxExpr)
hashj_tdb->setDelayLeftRequest(true);
// Query limits.
if ((afterJoinPred1 != NULL) || (afterJoinPred2 != NULL))
{
ULng32 joinedRowsBeforePreempt =
(ULng32)getDefault(QUERY_LIMIT_SQL_PROCESS_CPU_XPROD);
if ((joinedRowsBeforePreempt > 0))
hashj_tdb->setXproductPreemptMax(joinedRowsBeforePreempt);
}
// if an Insert/Update/Delete operation exists below the left
// child, we need to turn off a hash join optimization which
// cancels the left side if inner table is empty
if( child(0)->seenIUD() )
{
hashj_tdb->setLeftSideIUD();
}
// restore the original down cri desc since this node changed it.
generator->setCriDesc(givenDesc, Generator::DOWN);
// set the new up cri desc.
generator->setCriDesc(returnedDesc, Generator::UP);
generator->setGenObj(this, hashj_tdb);
// reset the expression generation flag to generate float validation pcode
generator->setGenNoFloatValidatePCode(FALSE);
// reset the handleIndirectVC flag to its initial value
expGen->setHandleIndirectVC( vcflag );
return 0;
}
ExpTupleDesc::TupleDataFormat HashJoin::determineInternalFormat( const ValueIdList & rightList,
const ValueIdList & leftList,
RelExpr * relExpr,
NABoolean & resizeCifRecord,
Generator * generator,
NABoolean bmo_affinity,
NABoolean & considerBufferDefrag,
NABoolean uniqueHJ)
{
RelExpr::CifUseOptions bmo_cif = RelExpr::CIF_SYSTEM;
considerBufferDefrag = FALSE;
if (CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT_BMO) == DF_OFF)
{
bmo_cif = RelExpr::CIF_OFF;
resizeCifRecord = FALSE;
return ExpTupleDesc::SQLARK_EXPLODED_FORMAT;
}
UInt32 maxRowSize = 0;
//determine whether we want to defragment the buffers or not based on the average row size
double ratio = CmpCommon::getDefaultNumeric(COMPRESSED_INTERNAL_FORMAT_DEFRAG_RATIO);
double avgRowSize = getGroupAttr()->getAverageVarcharSize(rightList, maxRowSize);
considerBufferDefrag = ( maxRowSize >0 && avgRowSize/maxRowSize < ratio);
if (!uniqueHJ)
{
avgRowSize = getGroupAttr()->getAverageVarcharSize(leftList, maxRowSize);
considerBufferDefrag = considerBufferDefrag && ( maxRowSize >0 && avgRowSize/maxRowSize < ratio);
}
if (CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT_BMO) == DF_ON)
{
bmo_cif = RelExpr::CIF_ON;
resizeCifRecord = (rightList.hasVarChars() || leftList.hasVarChars());
return ExpTupleDesc::SQLMX_ALIGNED_FORMAT;
}
//CIF_SYSTEM
if (bmo_affinity == TRUE)
{
if (generator->getInternalFormat() == ExpTupleDesc::SQLMX_ALIGNED_FORMAT)
{
resizeCifRecord = (rightList.hasVarChars() || leftList.hasVarChars());
return ExpTupleDesc::SQLMX_ALIGNED_FORMAT;
}
else
{
CMPASSERT(generator->getInternalFormat() == ExpTupleDesc::SQLARK_EXPLODED_FORMAT);
resizeCifRecord = FALSE;
return ExpTupleDesc::SQLARK_EXPLODED_FORMAT;
}
}
ExpTupleDesc::TupleDataFormat lTupleFormat = generator->getInternalFormat();
UInt32 lAlignedHeaderSize= 0;
UInt32 lAlignedVarCharSize = 0;
UInt32 lExplodedLength = 0;
UInt32 lAlignedLength = 0;
double lAvgVarCharUsage = 1;
NABoolean lResizeRecord = FALSE;
ExpTupleDesc::TupleDataFormat rTupleFormat = generator->getInternalFormat();
UInt32 rAlignedHeaderSize= 0;
UInt32 rAlignedVarCharSize = 0;
UInt32 rExplodedLength = 0;
UInt32 rAlignedLength = 0;
double rAvgVarCharUsage = 1;
NABoolean rResizeRecord = FALSE;
rTupleFormat = generator->determineInternalFormat(rightList,
relExpr,
rResizeRecord,
bmo_cif,
bmo_affinity,
rAlignedLength,
rExplodedLength,
rAlignedVarCharSize,
rAlignedHeaderSize,
rAvgVarCharUsage);
lTupleFormat = generator->determineInternalFormat(leftList,
relExpr,
lResizeRecord,
bmo_cif,
bmo_affinity,
lAlignedLength,
lExplodedLength,
lAlignedVarCharSize,
lAlignedHeaderSize,
lAvgVarCharUsage);
if (rTupleFormat == ExpTupleDesc::SQLARK_EXPLODED_FORMAT &&
lTupleFormat == ExpTupleDesc::SQLARK_EXPLODED_FORMAT)
{
resizeCifRecord = FALSE;
return ExpTupleDesc::SQLARK_EXPLODED_FORMAT;
}
UInt32 rAlignedNonVarSize = rAlignedLength - rAlignedVarCharSize;
UInt32 lAlignedNonVarSize = lAlignedLength - lAlignedVarCharSize;
if (rTupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT &&
lTupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT)
{
resizeCifRecord = (rResizeRecord || lResizeRecord);
return ExpTupleDesc::SQLMX_ALIGNED_FORMAT;
}
//at this point one is aligned the other is exploded
double cifRowSizeAdj = CmpCommon::getDefaultNumeric(COMPRESSED_INTERNAL_FORMAT_ROW_SIZE_ADJ);
double lEstRowCount = 1;
double rEstRowCount = 1;
lEstRowCount = child(0)->getGroupAttr()->getResultCardinalityForEmptyInput().value();
rEstRowCount = child(1)->getGroupAttr()->getResultCardinalityForEmptyInput().value();
if ( (rAlignedVarCharSize > rAlignedHeaderSize ||
lAlignedVarCharSize > lAlignedHeaderSize) &&
(((rAlignedNonVarSize + rAvgVarCharUsage * rAlignedVarCharSize ) * rEstRowCount +
(lAlignedNonVarSize + lAvgVarCharUsage * lAlignedVarCharSize ) * lEstRowCount) <
(rExplodedLength * rEstRowCount + lExplodedLength * lEstRowCount) * cifRowSizeAdj))
{
resizeCifRecord = (rResizeRecord || lResizeRecord);
return ExpTupleDesc::SQLMX_ALIGNED_FORMAT;
}
resizeCifRecord = FALSE;
return ExpTupleDesc::SQLARK_EXPLODED_FORMAT;
}
CostScalar HashJoin::getEstimatedRunTimeMemoryUsage(NABoolean perCPU)
{
GroupAttributes * childGroupAttr = child(1).getGroupAttr();
const CostScalar childRecordSize = childGroupAttr->getCharacteristicOutputs().getRowLength();
const CostScalar childRowCount = child(1).getPtr()->getEstRowsUsed();
// Each record also uses a header (HashRow) in memory (8 bytes for 32bit).
// Hash tables also take memory -- they are about %50 longer than the
// number of entries.
const ULng32
memOverheadPerRecord = sizeof(HashRow) + sizeof(HashTableHeader) * 3 / 2 ;
CostScalar totalHashTableMemory =
childRowCount * (childRecordSize + memOverheadPerRecord);
// one buffer for the outer table
totalHashTableMemory += ActiveSchemaDB()->getDefaults().getAsLong(GEN_HSHJ_BUFFER_SIZE);
const PhysicalProperty* const phyProp = getPhysicalProperty() ;
Lng32 numOfStreams = 1;
PartitioningFunction * partFunc = NULL;
if (phyProp)
{
partFunc = phyProp -> getPartitioningFunction() ;
numOfStreams = partFunc->getCountOfPartitions();
if ( partFunc -> isAReplicationPartitioningFunction() == TRUE )
{
totalHashTableMemory *= numOfStreams;
}
}
if ( perCPU == TRUE ) {
totalHashTableMemory /= numOfStreams;
}
return totalHashTableMemory;
}
double HashJoin::getEstimatedRunTimeMemoryUsage(ComTdb * tdb)
{
CostScalar totalHashTableMemory = getEstimatedRunTimeMemoryUsage(FALSE);
double memoryLimitPerCpu;
ULng32 memoryQuotaInMB = ((ComTdbHashj *)tdb)->memoryQuotaMB();
if (memoryQuotaInMB)
memoryLimitPerCpu = memoryQuotaInMB * 1024 * 1024 ;
else
{
memoryLimitPerCpu =
ActiveSchemaDB()->getDefaults().getAsLong(EXE_MEMORY_AVAILABLE_IN_MB) * 1024 * 1024 ;
}
Lng32 numOfStreams = 1;
const PhysicalProperty* const phyProp = getPhysicalProperty() ;
if (phyProp)
{
PartitioningFunction * partFunc = phyProp -> getPartitioningFunction() ;
numOfStreams = partFunc->getCountOfPartitions();
}
CostScalar memoryPerCpu = totalHashTableMemory/numOfStreams ;
if ( memoryPerCpu > memoryLimitPerCpu )
{
memoryPerCpu = memoryLimitPerCpu;
}
totalHashTableMemory = memoryPerCpu * numOfStreams ;
return totalHashTableMemory.value();
}
double HashJoin::getEstimatedRunTimeOverflowSize(double memoryQuotaMB)
{
// Setup overflow size for join with formula ov = ((s0-m)/s0)*(s0+s1), where
// s0 = size of child0, s1 = size of child1 and m the memory quota for NJ
//
if ( memoryQuotaMB > 0 ) {
GroupAttributes * c0 = child(0).getGroupAttr();
double c0RLen = c0->getCharacteristicOutputs().getRowLength();
double c0Rows = (child(0).getPtr()->getEstRowsUsed()).getValue();
GroupAttributes * c1 = child(1).getGroupAttr();
double c1RLen = c1->getCharacteristicOutputs().getRowLength();
double c1Rows = (child(1).getPtr()->getEstRowsUsed()).getValue();
double s0 = c0RLen * c0Rows;
double s1 = c1RLen * c1Rows;
Lng32 pipelines = 1;
const PhysicalProperty* const phyProp = getPhysicalProperty() ;
if (phyProp)
{
PartitioningFunction * partFunc = phyProp -> getPartitioningFunction() ;
if ( partFunc )
pipelines = partFunc->getCountOfPartitions();
}
double delta = s1 / pipelines - memoryQuotaMB * COM_ONE_MEG ;
if ( delta > 0 ) {
double ov = ((delta / s1) * (s0 + s1)) * pipelines;
return ov;
}
}
return 0;
}
// NABoolean HashJoin::canUseUniqueHashJoin()
// Decide if this join can use the Unique Hash Join option. This
// option can be significantly faster than the regular hash join, but
// does not support many features. First, the unique hash join does
// not support overflow, so we must ensure that the inner side can fit
// int memory. The Unique Hash Join does not support:
// - Overflow
// - Outer joins (left, right or full)
// - selection or join predicates
// - anti semi join
//
// The Unique Hash Join only supports
// - unique joins (at most one row per probe, exception semi joins)
// Note that the method rowsFromLeftHaveUniqueMatch() used below
// will return TRUE for Semi-Joins regardless of the uniqueness of
// the join keys
//
// - joins with equi join predicates (cross products are not supported)
//
// Semi joins are supported by the Unique Hash Join even if the inner
// table contains duplicates. It actually does this naturally with no
// special code for semi joins. This works because the unique hash
// join implementation expects to find only one match and will not
// look for additional matches after finding the first. This is the
// exact behavior required by the semi join. The Unique Hash Join
// could elimiate these duplicates in the build phase, but does not
// currently do this.
//
NABoolean HashJoin::canUseUniqueHashJoin()
{
// Do not use Unique Hash Join if it is turned OFF
if(CmpCommon::getDefault(UNIQUE_HASH_JOINS) == DF_OFF)
{
return FALSE;
}
if(!isLeftJoin() &&
!isRightJoin() &&
joinPred().isEmpty() &&
selectionPred().isEmpty() &&
!isAntiSemiJoin() &&
rowsFromLeftHaveUniqueMatch() &&
!getEquiJoinPredicates().isEmpty() &&
(CmpCommon::context()->internalCompile() != CmpContext::INTERNAL_MODULENAME) &&
!(CmpCommon::statement()->isSMDRecompile())
)
{
// If Unique Hash Joins are turned ON, use for all that qualify
// regardless of cardinalities.
//
if(CmpCommon::getDefault(UNIQUE_HASH_JOINS) == DF_ON)
{
return TRUE;
}
// Otherwise, for UNIQUE_HASH_JOINS == 'SYSTEM', decide based on
// cardinalities.
// Make sure Inner side of join is suitable for Unique Hash Join.
//
GroupAttributes *rightChildGA = child(1)->getGroupAttr();
if(rightChildGA->getGroupAnalysis())
{
const CANodeIdSet &nodes =
rightChildGA->getGroupAnalysis()->getAllSubtreeTables();
UInt32 innerTables =
(UInt32) getDefault(UNIQUE_HASH_JOIN_MAX_INNER_TABLES);
// Default is 1GB
UInt32 innerTableSizeLimitInMB =
(UInt32) getDefault(UNIQUE_HASH_JOIN_MAX_INNER_SIZE);
// Default is 100MB
UInt32 innerTableSizePerInstanceLimitInMB =
(UInt32) getDefault(UNIQUE_HASH_JOIN_MAX_INNER_SIZE_PER_INSTANCE);
RowSize rowSize = rightChildGA->getRecordLength();
// The extended size
rowSize += sizeof(HashRow);
// The hash table entries are rounded up.
// So do the same here.
//
rowSize = ROUND8(rowSize);
Lng32 numPartitions = 1;
if(getParallelJoinType() == 1)
{
const PhysicalProperty* physProp = child(1)->getPhysicalProperty();
PartitioningFunction *partFunc = physProp->getPartitioningFunction();
Lng32 numPartitions = partFunc->getCountOfPartitions();
}
if(nodes.entries() == 1)
{
TableAnalysis *tabAnalysis =
nodes.getFirst().getNodeAnalysis()->getTableAnalysis();
if(tabAnalysis)
{
CostScalar numRows = tabAnalysis->getCardinalityOfBaseTable();
CostScalar estRows = child(1)->getEstRowsUsed();
if(numRows >= estRows)
{
CostScalar innerTableSize = numRows * rowSize;
CostScalar innerTableSizePerInstance = innerTableSize /
numPartitions;
// Convert to MBs
innerTableSize /= (1024 * 1024);
innerTableSizePerInstance /= (1024 * 1024);
if(innerTableSize < innerTableSizeLimitInMB &&
innerTableSizePerInstance < innerTableSizePerInstanceLimitInMB)
{
// Use the Unique Hash Join Implementation.
//
return TRUE;
}
}
}
}
// single inner table did not qualify for unique hash join.
// give unique hash join another chance using max cardinality
// estimate to determine if inner hash table fits in memory.
if (nodes.entries() <= innerTables)
{
CostScalar maxRows = rightChildGA->
getResultMaxCardinalityForEmptyInput();
CostScalar innerTableMaxSize = maxRows * rowSize
/ (1024 * 1024);
CostScalar innerTableMaxSizePerInstance = innerTableMaxSize /
numPartitions;
if (innerTableMaxSize < innerTableSizeLimitInMB &&
innerTableMaxSizePerInstance < innerTableSizePerInstanceLimitInMB)
// Use the Unique Hash Join Implementation.
//
return TRUE;
}
}
}
return FALSE;
}
// case of hash anti semi join optimization (NOT IN)
// add/build expression to detect inner and outer null :
// checkOuteNullexpr_ : <outer> IS NULL
// checkInnerNullExpr_: <inner} IS NULL
void HashJoin::addCheckNullExpressions(CollHeap * wHeap)
{
if(!getIsNotInSubqTransform() )
{
return;
}
ValueId valId;
Int32 notinCount = 0;
for ( valId = getEquiJoinPredicates().init();
getEquiJoinPredicates().next(valId);
getEquiJoinPredicates().advance(valId))
{
ItemExpr * itemExpr = valId.getItemExpr();
if ((itemExpr->getOperatorType() == ITM_EQUAL) &&
((BiRelat *)itemExpr)->getIsNotInPredTransform())
{
notinCount++;
ItemExpr * child0 = itemExpr->child(0);
ItemExpr * child1 = itemExpr->child(1);
const NAType &outerType = child0->getValueId().getType();
const NAType &innerType = child1->getValueId().getType();
if (innerType.supportsSQLnull() &&
!((BiRelat*)itemExpr)->getInnerNullFilteringDetected())
{
ItemExpr * itm = new (wHeap) UnLogic(ITM_IS_NULL, child1);
itm->synthTypeAndValueId(TRUE);
getCheckInnerNullExpr().insert(itm->getValueId());
// reuse is disabled in this phase on not in optimization
setReuse(FALSE);
}
//outer column
if (outerType.supportsSQLnull() &&
!((BiRelat*)itemExpr)->getOuterNullFilteringDetected())
{
ItemExpr * itm = new (wHeap) UnLogic(ITM_IS_NULL, child0);
itm->synthTypeAndValueId(TRUE);
getCheckOuterNullExpr().insert(itm->getValueId());
}
}
}
// assert if more than one notin found
DCMPASSERT(notinCount = 1);
}
//10-060710-7606(port of 10-050706-9430) - Begin
// This function was Reimplemented (7/1/08) soln 10-080518-3261
// This function Recursively navigates the item expression
// tree to collect all the value ids needed for expression
// evaluation.
void gatherValuesFromExpr(ValueIdList &vs,
ItemExpr *ie,
Generator *generator)
{
ValueId valId(ie->getValueId());
if(generator->getMapInfoAsIs(valId))
{
// If it is directly in the mapTable record then it is an
// output of the right child.
vs.insert(valId);
}
else
{
// Check for a special case like a CAST(<x>) where <x> is
// available in the MapTable.
for (Int32 i=0; i < ie->getArity(); i++)
{
gatherValuesFromExpr(vs, ie->child(i), generator);
}
}
}
short MergeJoin::codeGen(Generator * generator)
{
ExpGenerator * exp_gen = generator->getExpGenerator();
Space * space = generator->getSpace();
MapTable * my_map_table = generator->appendAtEnd();
// set flag to enable pcode for indirect varchar
NABoolean vcflag = exp_gen->handleIndirectVC();
if (CmpCommon::getDefault(VARCHAR_PCODE) == DF_ON) {
exp_gen->setHandleIndirectVC( TRUE );
}
NABoolean is_semijoin = isSemiJoin();
NABoolean is_leftjoin = isLeftJoin();
NABoolean is_anti_semijoin = isAntiSemiJoin();
// find if the left child and/or the right child will have atmost
// one matching row. If so, an faster merge join implementation
// will be used at runtime.
// This optimization is not used for left or semi joins.
NABoolean isLeftUnique = FALSE;
NABoolean isRightUnique = FALSE;
NABoolean fastMJEval = FALSE;
if ((! is_semijoin) &&
(! is_anti_semijoin) &&
(! is_leftjoin))
{
#ifdef _DEBUG
isLeftUnique = (getenv("LEFT_UNIQUE_MJ") ? TRUE : leftUnique());
isRightUnique = (getenv("RIGHT_UNIQUE_MJ") ? TRUE : rightUnique());
#else
isLeftUnique = leftUnique();
isRightUnique = rightUnique();
#endif
if (isLeftUnique || isRightUnique)
fastMJEval = TRUE;
}
ex_expr * merge_expr = 0;
ex_expr * comp_expr = 0;
ex_expr * pre_join_expr = 0;
ex_expr * post_join_expr = 0;
ex_expr * left_check_dup_expr = 0;
ex_expr * right_check_dup_expr = 0;
ex_expr * left_encoded_key_expr = NULL;
ex_expr * right_encoded_key_expr = NULL;
////////////////////////////////////////////////////////////////////////////
//
// Layout of row returned by this node.
//
// |------------------------------------------------------------------------|
// | input data | left child's data | right child's data | instantiated row |
// | ( I tupps) | ( L tupps ) | ( R tupp ) | ( 1 tupp ) |
// |------------------------------------------------------------------------|
//
// input data: the atp input to this node by its parent. This is given
// to both children as input.
// left child data: tupps appended by the left child
// right child data: tupps appended by the left child
// instantiated row: For some left join cases, the
// null values are instantiated. See proc
// Join::instantiateValuesForLeftJoin for details at the
// end of this file.
//
// Returned row to parent contains:
//
// I + L tupps, if this is a semi join. Rows from right are not returned.
//
// If this is not a semi join, then:
// I + L + R tupps, if instantiation is not done.
// I + L + R + 1 tupps, if instantiation is done.
//
////////////////////////////////////////////////////////////////////////////
ex_cri_desc * given_desc = generator->getCriDesc(Generator::DOWN);
generator->setCriDesc(given_desc, Generator::DOWN);
child(0)->codeGen(generator);
ComTdb * child_tdb1 = (ComTdb *)(generator->getGenObj());
ExplainTuple *leftExplainTuple = generator->getExplainTuple();
ex_cri_desc * left_child_desc = generator->getCriDesc(Generator::UP);
generator->setCriDesc(left_child_desc, Generator::DOWN);
child(1)->codeGen(generator);
ComTdb * child_tdb2 = (ComTdb *)(generator->getGenObj());
ExplainTuple *rightExplainTuple = generator->getExplainTuple();
ex_cri_desc * right_child_desc = generator->getCriDesc(Generator::UP);
unsigned short returned_tuples;
short returned_instantiated_row_atp_index = -1;
short returned_right_row_atp_index = -1;
ex_cri_desc * work_cri_desc = NULL;
short instantiated_row_atp_index = -1;
short encoded_key_atp_index = -1;
work_cri_desc = new(space) ex_cri_desc(3, space);
if (is_semijoin || is_anti_semijoin)
returned_tuples = left_child_desc->noTuples();
else
{
// not a semi join.
// if right side can return atmost one rows, then no need to
// save dups. No new row is created at returned_right_row_atp_index
// in this case.
if (fastMJEval)
returned_tuples = right_child_desc->noTuples();
else
{
returned_tuples = (unsigned short)(left_child_desc->noTuples() + 1);
returned_right_row_atp_index = returned_tuples - 1;
}
if (nullInstantiatedOutput().entries() > 0)
{
instantiated_row_atp_index = 3;
returned_instantiated_row_atp_index = (short) returned_tuples++;
}
}
ex_cri_desc * returned_desc = new(space) ex_cri_desc(returned_tuples, space);
GenAssert(!getOrderedMJPreds().isEmpty(),"getOrderedMJPreds().isEmpty()");
////////////////////////////////////////////////////////////
// Before generating any expression for this node, set the
// the expression generation flag not to generate float
// validation PCode. This is to speed up PCode evaluation
////////////////////////////////////////////////////////////
generator->setGenNoFloatValidatePCode(TRUE);
NABoolean doEncodedKeyCompOpt = FALSE;
doEncodedKeyCompOpt = TRUE;
encoded_key_atp_index = 2;
// generate expressions to find out if left or right rows are duplicate
// of the previous rows.
ValueIdSet right_dup_val_id_set;
ValueIdList leftChildOfMJPList;//list of left children of orderedMJPreds()
ValueIdList rightChildOfMJPList;//list of right children of orderedMJPreds()
ValueId val_id;
CollIndex i;
for (i = 0; i < orderedMJPreds().entries(); i++)
{
// create a place holder node to represent the previous row,
// which is exactly the same as
// the child values except for its atp. At runtime, the previous
// value is passed to the expression evaluator as the second atp.
// Usually, the child RelExpr values are the immediate children
// of the orderedMJPreds. However, in some cases an immediate
// child is an expression made up of values coming from the
// child RelExpr. Typically, this is a Cast expression
// introduced by a MapValueId node. Here, we handle the usual
// case and the case of the Cast expression. Other expressions
// that are not evaluated by the child RelExpr will result in an
// Assertion. If we ever trigger this assertion, we may need to
// make this code more general.
val_id = orderedMJPreds()[i];
// Do the right row.
ValueId child1Vid =
val_id.getItemExpr()->child(1)->castToItemExpr()->getValueId();
// Place holder for right values.
//
Cast *ph = NULL;
// Attributes of the right child.
//
Attributes *childAttr = NULL;
// ValueId of value actually supplied by child RelExpr.
//
ValueId childOutputVid;
MapInfo *child1MapInfo = generator->getMapInfoAsIs(child1Vid);
// If there is no mapInfo for the immediate child, then it is
// not directly supplied by the child RelExpr. Check for the
// case when the immediate child is a CAST and the child of the
// CAST is supplied directly by the child RelExpr.
//
if(!child1MapInfo)
{
// If this is a CAST AND ...
//
if((child1Vid.getItemExpr()->getOperatorType() == ITM_CAST) ||
(child1Vid.getItemExpr()->getOperatorType() == ITM_TRANSLATE) ||
(child1Vid.getItemExpr()->getOperatorType() == ITM_NOTCOVERED))
{
ValueId nextChild0Vid =
child1Vid.getItemExpr()->child(0)->castToItemExpr()->getValueId();
// If the child of the CAST is in the mapTable (supplied by
// the child RelExpr)
//
if (generator->getMapInfoAsIs(nextChild0Vid))
{
// Remember the actual value supplied by the child RelExpr.
//
childOutputVid = nextChild0Vid;
// Create place holder node.
//
ph = new (generator->wHeap())
Cast(child1Vid.getItemExpr()->child(0),
&(nextChild0Vid.getType()));
// Attributes for this place holder node. Same as the
// child value, but we will later change the ATP.
//
childAttr = generator->getMapInfo(nextChild0Vid)->getAttr();
}
}
}
else
{
// The immediate child is supplied by the child RelExpr.
//
// Remember the actual value supplied by the child RelExpr.
//
childOutputVid = child1Vid;
// Create place holder node.
//
ph = new(generator->wHeap())
Cast(val_id.getItemExpr()->child(1),
&(child1Vid.getType()));
// Attributes for this place holder node. Same as the
// child value, but we will later change the ATP.
//
childAttr = generator->getMapInfo(child1Vid)->getAttr();
}
// If we did not find a childAttr, then neither the immediate
// child nor the child of an immediate CAST is supplied by the
// child RelExpr. We need to be more general here.
//
GenAssert(childAttr, "Merge Join: expression not found");
ph->bindNode(generator->getBindWA());
if ( childAttr->getAtpIndex() > 1)
{
// Make a mapTable entry for the place holder, just like the
// child value
//
MapInfo * map_info = generator->addMapInfo(ph->getValueId(),
childAttr);
// Make this mapTable entry refer to ATP 1.
//
map_info->getAttr()->setAtp(1);
// mark ph as code generated node since we don't want the
// Cast to actually do a conversion.
map_info->codeGenerated();
}
if(!child1MapInfo)
{
// If the immediate child is not supplied by the child RelExpr
// and it is a CAST node, we need to add the equivalent CAST
// node for the left side of the DUP expression.
// Here is the expression we need to generate in this case:
// EQUAL
// |
// /------------\
// CAST CAST must create equiv node here
// | |
// supplied by Child A PlaceHolder-For-A
//
ph =
new(generator->wHeap()) Cast(ph,
&(child1Vid.getType()));
}
BiRelat * bi_relat =
new(generator->wHeap()) BiRelat(ITM_EQUAL,
val_id.getItemExpr()->child(1),
ph);
// for the purpose of checking duplicates, nulls are equal
// to other nulls. Mark them so.
bi_relat->setSpecialNulls(-1);
bi_relat->bindNode(generator->getBindWA());
leftChildOfMJPList.insert(val_id.getItemExpr()->child(0)->getValueId());
// This must be the actual values supplied by Child RelExpr.
rightChildOfMJPList.insert(childOutputVid);
right_dup_val_id_set.insert(bi_relat->getValueId());
if ((val_id.getItemExpr()->child(0)->getValueId().getType().supportsSQLnull()) ||
(val_id.getItemExpr()->child(1)->getValueId().getType().supportsSQLnull()))
doEncodedKeyCompOpt = FALSE;
}
// now generate an expression to see if the left values are less
// than the right values. This is needed to advance the left child
// if the expression is true.
// Note: later, only generate one expression for merge and comp
// and have it return status indicating if the left row is less than,
// equal to or greated than the right. Use a CASE statement to do that.
ExprValueId left_tree = (ItemExpr *) NULL;
ExprValueId right_tree = (ItemExpr *) NULL;
ItemExprTreeAsList * left_list = NULL;
ItemExprTreeAsList * right_list = NULL;
ValueIdList leftEncodedValIds;
ValueIdList rightEncodedValIds;
ULng32 encoded_key_len = 0;
if (NOT doEncodedKeyCompOpt)
{
left_list = new(generator->wHeap()) ItemExprTreeAsList(
&left_tree,
ITM_ITEM_LIST,
RIGHT_LINEAR_TREE);
right_list = new(generator->wHeap()) ItemExprTreeAsList(
&right_tree,
ITM_ITEM_LIST,
RIGHT_LINEAR_TREE);
}
for (i = 0; i < orderedMJPreds().entries(); i++)
{
val_id = orderedMJPreds()[i];
ItemExpr * left_val = val_id.getItemExpr()->child(0);
ItemExpr * right_val = val_id.getItemExpr()->child(1);
// if the left and right values do not have the same type,
// then convert them to a common super type before encoding.
const NAType & leftType = left_val->getValueId().getType();
const NAType & rightType = right_val->getValueId().getType();
if (NOT (leftType == rightType))
{
UInt32 flags =
((CmpCommon::getDefault(LIMIT_MAX_NUMERIC_PRECISION) == DF_ON)
? NAType::LIMIT_MAX_NUMERIC_PRECISION : 0);
// find the common super datatype xs
const NAType *resultType =
leftType.synthesizeType(
SYNTH_RULE_UNION,
leftType,
rightType,
generator->wHeap(),
&flags);
CMPASSERT(resultType);
// add type conversion operators if necessary
if (NOT (leftType == *resultType))
{
left_val = new(generator->wHeap()) Cast(left_val,resultType);
}
if (NOT (rightType == *resultType))
{
right_val = new(generator->wHeap()) Cast(right_val,resultType);
}
}
// encode the left and right values before doing the comparison.
short desc_flag = FALSE;
if (getLeftSortOrder()[i].getItemExpr()->getOperatorType() == ITM_INVERSE)
desc_flag = TRUE;
CompEncode * encoded_left_val
= new(generator->wHeap()) CompEncode(left_val, desc_flag);
CompEncode * encoded_right_val
= new(generator->wHeap()) CompEncode(right_val, desc_flag);
encoded_left_val->bindNode(generator->getBindWA());
encoded_right_val->bindNode(generator->getBindWA());
if (doEncodedKeyCompOpt)
{
leftEncodedValIds.insert(encoded_left_val->getValueId());
rightEncodedValIds.insert(encoded_right_val->getValueId());
}
else
{
// add the search condition
left_list->insert(encoded_left_val);
right_list->insert(encoded_right_val);
}
}
ItemExpr * compTree = 0;
if (NOT doEncodedKeyCompOpt)
{
compTree = new(generator->wHeap()) BiRelat(ITM_LESS, left_tree, right_tree);
compTree = new(generator->wHeap()) BoolResult(compTree);
// bind/type propagate the comp tree
compTree->bindNode(generator->getBindWA());
}
// At runtime when this merge join expression is evaluated, the left child
// values are passed in the first atp, and the right child values
// are passed in the second atp. Change the atp value of right child's output
// to 1, if it is not already an input value.
const ValueIdSet & child1CharacteristicOutputs =
child(1)->castToRelExpr()->getGroupAttr()->getCharacteristicOutputs();
// create a list of all values returned from right child that are
// not input to this node.
ValueIdList rightChildOutput;
for (val_id = child1CharacteristicOutputs.init();
child1CharacteristicOutputs.next(val_id);
child1CharacteristicOutputs.advance(val_id))
{
// If it is part of my input or not in the mapTable... The
// assumption is that if it is not in the mapTable and it is not
// directly in the inputs, it can be derived from the inputs
//
if (! getGroupAttr()->getCharacteristicInputs().contains(val_id))
{
// This new function takes care of the CAST(<x>) function as well
gatherValuesFromExpr(rightChildOutput, val_id.getItemExpr(), generator);
}
}
// Change the atp value of right child's output to 1
// Atpindex of -1 means leave atpindex as is.
//
exp_gen->assignAtpAndAtpIndex(rightChildOutput, 1, -1);
ExpTupleDesc::TupleDataFormat tupleFormat = generator->getInternalFormat();
ItemExpr * newPredTree = NULL;
if (NOT doEncodedKeyCompOpt)
{
// orderedMJPreds() is a list containing predicates of the form:
// <left value1> '=' <right value1>, <left value2> '=' <right value2> ...
// Generate the merge expression. This is used to find out if
// the left and right rows match for equi join.
newPredTree = orderedMJPreds().rebuildExprTree(ITM_AND,TRUE,TRUE);
exp_gen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&merge_expr);
// generate the comp expression. This expression is used
// to look for a matching value in the hash table.
exp_gen->generateExpr(compTree->getValueId(), ex_expr::exp_SCAN_PRED,
&comp_expr);
}
else
{
// generate expression to create encoded left key buffer.
// The work atp where encoded is created is passed in as atp1 at runtime.
exp_gen->generateContiguousMoveExpr(leftEncodedValIds,
0, // don't add convert nodes
1, // atp 1
encoded_key_atp_index,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
encoded_key_len,
&left_encoded_key_expr,
0,
ExpTupleDesc::SHORT_FORMAT);
// generate expression to create encoded right key buffer
// The work atp where encoded is created is passed in as atp0 at runtime.
exp_gen->generateContiguousMoveExpr(rightEncodedValIds,
0, // don't add convert nodes
0, // atp 0
encoded_key_atp_index,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
encoded_key_len,
&right_encoded_key_expr,
0,
ExpTupleDesc::SHORT_FORMAT);
}
/*
// generate expression to evaluate the
// non-equi join predicates applied before NULL-instantiation
if (! joinPred().isEmpty())
{
ItemExpr * newPredTree;
newPredTree = joinPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
exp_gen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&pre_join_expr);
}
*/
// Change the atp value of right child's output to 0
// Second argument -1 means leave atpindex as is.
exp_gen->assignAtpAndAtpIndex(rightChildOutput, 0, -1);
// generate expression to save the duplicate rows returned from right
// child. Do it if rightUnique is false.
ex_expr * right_copy_dup_expr = NULL;
ULng32 right_row_len = 0;
if (NOT fastMJEval)
{
ValueIdList resultValIdList;
exp_gen->generateContiguousMoveExpr(rightChildOutput,
-1, // add convert nodes
1 /*atp*/, 2 /*atpindex*/,
tupleFormat,
right_row_len,
&right_copy_dup_expr,
NULL, ExpTupleDesc::SHORT_FORMAT,
NULL,
&resultValIdList);
ValueIdList prevRightValIds; // the secend operand of dup comparison
CollIndex i = 0;
for (i = 0; i < resultValIdList.entries(); i++)
{
// create the new item xpression
ItemExpr * newCol = new(generator->wHeap())
NATypeToItem((NAType *)&(resultValIdList[i].getType()));
newCol->synthTypeAndValueId();
// copy the attributes
Attributes * originalAttr =
generator->getMapInfo(resultValIdList[i])->getAttr();
Attributes * newAttr =
generator->addMapInfo(newCol->getValueId(), 0)->getAttr();
newAttr->copyLocationAttrs(originalAttr);
// set atp
newAttr->setAtp(1);
// add the new valueId to the list of 2nd operand
prevRightValIds.insert(newCol->getValueId());
}
// At runtime, duplicate right rows in right child up queue are checked
// by comparing that row with one of the saved right dup rows.
ValueIdSet right_dup_val_id_set;
for (i = 0; i < rightChildOfMJPList.entries(); i++)
{
val_id = rightChildOfMJPList[i];
CollIndex index = rightChildOutput.index(val_id);
BiRelat * bi_relat =
new(generator->wHeap()) BiRelat(ITM_EQUAL,
rightChildOfMJPList[i].getItemExpr(),
prevRightValIds[index].getItemExpr());
// for the purpose of checking duplicates, nulls are equal
// to other nulls. Mark them so.
bi_relat->setSpecialNulls(-1);
bi_relat->bindNode(generator->getBindWA());
right_dup_val_id_set.insert(bi_relat->getValueId());
}
// generate expressions to do the duplicate row checks for right child.
// The row returned from right child is compared to the saved row returned
// by the right child.
newPredTree = right_dup_val_id_set.rebuildExprTree(ITM_AND,TRUE,TRUE);
exp_gen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&right_check_dup_expr);
for (i = 0; i < resultValIdList.entries(); i++)
{
ValueId resultValId = resultValIdList[i];
ValueId rightChildOutputValId = rightChildOutput[i];
Attributes * resultAttr = generator->getMapInfo(resultValId)->getAttr();
Attributes * rightChildAttr = generator->getMapInfo(rightChildOutputValId)->getAttr();
Int32 rightChildAtpIndex = rightChildAttr->getAtpIndex();
rightChildAttr->copyLocationAttrs(resultAttr);
}
// at runtime, duplicate left rows are checked by comparing the
// left row with one of the saved right dup rows.
ValueIdSet left_dup_val_id_set;
for (i = 0; i < rightChildOfMJPList.entries(); i++)
{
val_id = rightChildOfMJPList[i];
CollIndex index = rightChildOutput.index(val_id);
BiRelat * bi_relat =
new(generator->wHeap()) BiRelat(ITM_EQUAL,
leftChildOfMJPList[i].getItemExpr(),
resultValIdList[index].getItemExpr());
// for the purpose of checking duplicates, nulls are equal
// to other nulls. Mark them so.
bi_relat->setSpecialNulls(-1);
bi_relat->bindNode(generator->getBindWA());
left_dup_val_id_set.insert(bi_relat->getValueId());
}
// generate expressions to do the duplicate row checks for left child.
// The row returned from left child is compared to the saved row returned
// by the right child.
newPredTree = left_dup_val_id_set.rebuildExprTree(ITM_AND,TRUE,TRUE);
exp_gen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&left_check_dup_expr);
}
// generate expression to evaluate the
// non-equi join predicates applied before NULL-instantiation
if (! joinPred().isEmpty())
{
ItemExpr * newPredTree;
newPredTree = joinPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
exp_gen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&pre_join_expr);
}
// now change the atp to 0 and atpindex to returned_right_row_atp_index.
if (NOT fastMJEval)
{
exp_gen->assignAtpAndAtpIndex(rightChildOutput, 0, returned_right_row_atp_index);
}
ex_expr * lj_expr = 0;
ex_expr * ni_expr = 0;
ULng32 rowlen = 0;
if (nullInstantiatedOutput().entries() > 0)
{
instantiateValuesForLeftJoin(generator,
0, returned_instantiated_row_atp_index,
&lj_expr, &ni_expr,
&rowlen,
NULL // No MapTable required
);
}
// set the atp index for values in the instantiated row.
for (i = 0; i < nullInstantiatedOutput().entries(); i++)
{
ValueId val_id = nullInstantiatedOutput()[i];
Attributes * attr = generator->getMapInfo(val_id)->getAttr();
attr->setAtp(0);
attr->setAtpIndex(returned_instantiated_row_atp_index);
// Do not do bulk move because null instantiate expression
// is not set in TDB to save execution time, see ComTdbMj below
attr->setBulkMoveable(FALSE);
}
// generate any expression to be applied after the join
if (! selectionPred().isEmpty())
{
ItemExpr * newPredTree = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
exp_gen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&post_join_expr);
}
bool isOverflowEnabled = (CmpCommon::getDefault(MJ_OVERFLOW) == DF_ON);
UInt16 scratchThresholdPct
= (UInt16) getDefault(SCRATCH_FREESPACE_THRESHOLD_PERCENT);
// Big Memory Operator (BMO) settings
// Use memory quota only if fragment has more than one BMO.
UInt16 numBMOsInFrag = (UInt16)generator->getFragmentDir()->getNumBMOs();
double BMOsMemoryLimit = 0;
UInt16 quotaMB = 0;
if ( CmpCommon::getDefaultLong(MJ_BMO_QUOTA_PERCENT) != 0)
{
// Apply quota system if either one the following two is true:
// 1. the memory limit feature is turned off and more than one BMOs
// 2. the memory limit feature is turned on
NADefaults &defs = ActiveSchemaDB()->getDefaults();
NABoolean mlimitPerCPU = defs.getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0;
if ( mlimitPerCPU || numBMOsInFrag > 1 ||
(numBMOsInFrag == 1 && CmpCommon::getDefault(EXE_SINGLE_BMO_QUOTA) == DF_ON)) {
quotaMB = (UInt16)
computeMemoryQuota(generator->getEspLevel() == 0,
mlimitPerCPU,
generator->getBMOsMemoryLimitPerCPU().value(),
generator->getTotalNumBMOsPerCPU(),
generator->getTotalBMOsMemoryPerCPU().value(),
numBMOsInFrag,
generator->getFragmentDir()->getBMOsMemoryUsage()
);
Lng32 mjMemoryLowbound = defs.getAsLong(EXE_MEMORY_LIMIT_LOWER_BOUND_MERGEJOIN);
if ( quotaMB < mjMemoryLowbound )
quotaMB = (UInt16)mjMemoryLowbound;
}
} else {
Lng32 memoryMB = getExeMemoryAvailable(generator->getEspLevel() == 0, 0);
quotaMB = (UInt16)( (numBMOsInFrag > 1) ? (memoryMB/numBMOsInFrag) : 0 ) ;
}
bool yieldQuota = !(generator->getRightSideOfFlow());
UInt16 quotaPct = (UInt16) getDefault(MJ_BMO_QUOTA_PERCENT);
#pragma nowarn(1506) // warning elimination
ComTdbMj * mj_tdb =
new(space) ComTdbMj(child_tdb1,
child_tdb2,
given_desc,
returned_desc,
(NOT doEncodedKeyCompOpt
? merge_expr : left_encoded_key_expr),
(NOT doEncodedKeyCompOpt
? comp_expr : right_encoded_key_expr),
left_check_dup_expr,
right_check_dup_expr,
lj_expr,
0,
right_copy_dup_expr,
right_row_len,
rowlen,
work_cri_desc,
instantiated_row_atp_index,
encoded_key_len,
encoded_key_atp_index,
pre_join_expr,
post_join_expr,
(queue_index)getDefault(GEN_MJ_SIZE_DOWN),
(queue_index)getDefault(GEN_MJ_SIZE_UP),
(Cardinality) getGroupAttr()->
getOutputLogPropList()[0]->getResultCardinality().value(),
getDefault(GEN_MJ_NUM_BUFFERS),
getDefault(GEN_MJ_BUFFER_SIZE),
is_semijoin,
is_leftjoin,
is_anti_semijoin,
isLeftUnique,
isRightUnique,
isOverflowEnabled,
scratchThresholdPct,
quotaMB,
quotaPct,
yieldQuota);
#pragma warn(1506) // warning elimination
generator->initTdbFields(mj_tdb);
if (CmpCommon::getDefault(EXE_DIAGNOSTIC_EVENTS) == DF_ON)
{
mj_tdb->setLogDiagnostics(true);
}
mj_tdb->setOverflowMode(generator->getOverflowMode());
if(!generator->explainDisabled()) {
generator->setExplainTuple(
addExplainInfo(mj_tdb, leftExplainTuple, rightExplainTuple, generator));
}
generator->setGenObj(this, mj_tdb);
// restore the original down cri desc since this node changed it.
generator->setCriDesc(given_desc, Generator::DOWN);
// set the new up cri desc.
generator->setCriDesc(returned_desc, Generator::UP);
// reset the expression generation flag to generate float validation pcode
generator->setGenNoFloatValidatePCode(FALSE);
// reset the handleIndirectVC flag to its initial value
exp_gen->setHandleIndirectVC( vcflag );
return 0;
} // MergeJoin::codeGen
short NestedJoin::codeGen(Generator * generator)
{
ExpGenerator * exp_gen = generator->getExpGenerator();
Space * space = generator->getSpace();
// set flag to enable pcode for indirect varchar
NABoolean vcflag = exp_gen->handleIndirectVC();
if (CmpCommon::getDefault(VARCHAR_PCODE) == DF_ON) {
exp_gen->setHandleIndirectVC( TRUE );
}
ex_expr * after_expr = 0;
NABoolean is_semijoin = isSemiJoin();
NABoolean is_antisemijoin = isAntiSemiJoin();
NABoolean is_leftjoin = isLeftJoin();
NABoolean is_undojoin = isTSJForUndo();
NABoolean is_setnferror = isTSJForSetNFError();
////////////////////////////////////////////////////////////////////////////
//
// Layout of row returned by this node.
//
// |------------------------------------------------------------------------|
// | input data | left child's data | right child's data| instantiated row |
// | ( I tupps ) | ( L tupps ) | ( R tupps ) | ( 1 tupp ) |
// |------------------------------------------------------------------------|
//
// <-- returned row from left ------->
// <------------------ returned row from right ---------->
//
// input data: the atp input to this node by its parent.
// left child data: tupps appended by the left child
// right child data: tupps appended by right child
// instantiated row: For some left join cases, the
// null values are instantiated. See proc
// Join::instantiateValuesForLeftJoin for details at the end of
// this file.
//
// Returned row to parent contains:
//
// I + L tupps, if this is a semi join. Rows from right are not returned.
//
// If this is not a semi join, then:
// I + L + R tupps, if instantiation is not done.
// I + L + R + 1 tupps, if instantiation is done.
//
////////////////////////////////////////////////////////////////////////////
ex_cri_desc * given_desc = generator->getCriDesc(Generator::DOWN);
// It is OK for neither child to exist when generating a merge union TDB
// for index maintenenace. The children are filled in at build time.
//
GenAssert((child(0) AND child(1)) OR (NOT child(0) AND NOT (child(1))),
"NestedJoin -- missing one child");
ComTdb * tdb1 = NULL;
ComTdb * tdb2 = NULL;
ExplainTuple *leftExplainTuple = NULL;
ExplainTuple *rightExplainTuple = NULL;
//++Triggers
// insert a temporary map table, so that we can later delete the children's map
// tables in case the nested join doesn't return values.
MapTable * beforeLeftMapTable = generator->appendAtEnd();
//--Triggers
// MV --
// We need to know if the right child is a VSBB Insert node.
NABoolean rightChildIsVsbbInsert = FALSE;
NABoolean leftChildIsVsbbInsert = FALSE;
GenAssert(!generator->getVSBBInsert(), "Not expecting VSBBInsert flag from parent.");
if(child(0) && child(1)) {
// generate code for left child tree
// - MVs
child(0)->codeGen(generator);
leftChildIsVsbbInsert = generator->getVSBBInsert();
generator->setVSBBInsert(FALSE);
tdb1 = (ComTdb *)(generator->getGenObj());
leftExplainTuple = generator->getExplainTuple();
// Override the queue sizes for the left child, if
// GEN_ONLJ_SET_QUEUE_LEFT is on.
if (generator->getMakeOnljLeftQueuesBig())
{
short queueResizeLimit = (short) getDefault(DYN_QUEUE_RESIZE_LIMIT);
short queueResizeFactor = (short) getDefault(DYN_QUEUE_RESIZE_FACTOR);
queue_index downSize = generator->getOnljLeftDownQueue();
queue_index upSize = generator->getOnljLeftUpQueue();
downSize = MAXOF(downSize, tdb1->getInitialQueueSizeDown());
upSize = MAXOF(upSize, tdb1->getInitialQueueSizeUp());
tdb1->setQueueResizeParams(downSize, upSize, queueResizeLimit, queueResizeFactor);
}
}
////////////////////////////////////////////////////////////
// Before generating any expression for this node, set the
// the expression generation flag not to generate float
// validation PCode. This is to speed up PCode evaluation
////////////////////////////////////////////////////////////
generator->setGenNoFloatValidatePCode(TRUE);
child(0)->isRowsetIterator() ? setRowsetIterator(TRUE) : setRowsetIterator(FALSE);
NABoolean tolerateNonFatalError = FALSE;
if (child(0)->getTolerateNonFatalError() == RelExpr::NOT_ATOMIC_)
{
tolerateNonFatalError = TRUE;
generator->setTolerateNonFatalError(TRUE);
generator->setTolerateNonFatalErrorInFlowRightChild(TRUE);
}
if (getTolerateNonFatalError() == RelExpr::NOT_ATOMIC_)
{
tolerateNonFatalError = TRUE;
}
ex_expr * before_expr = 0;
// generate join expression, if present.
if (! joinPred().isEmpty())
{
ItemExpr * newPredTree
= joinPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
exp_gen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&before_expr);
}
ex_cri_desc * left_child_desc = generator->getCriDesc(Generator::UP);
// if semi join, save the address of the last map table.
// This is used later to remove
// all map tables appended by the right child tree as the right child
// values are not visible above this node.
MapTable * save_map_table = 0;
if (is_semijoin || is_antisemijoin)
save_map_table = generator->getLastMapTable();
if(child(0) && child(1)) {
// give to the second child the returned descriptor from first child
generator->setCriDesc(left_child_desc, Generator::DOWN);
// reset the expression generation flag
generator->setGenNoFloatValidatePCode(FALSE);
// remember that we're code gen'ing the right side of a join.
NABoolean wasRightSideOfOnlj = generator->getRightSideOfOnlj();
NABoolean savComputeRowsetRowsAffected =
generator->computeRowsetRowsAffected();
if (getRowsetRowCountArraySize() > 0)
generator->setComputeRowsetRowsAffected(TRUE);
generator->setRightSideOfOnlj(TRUE);
// RHS of NestedJoin starts with LargeQueueSizes not in use (0).
// If a SplitTop is found, it may set the largeQueueSize to
// an appropriate value.
ULng32 largeQueueSize = generator->getLargeQueueSize();
generator->setLargeQueueSize(0);
// generate code for right child tree
child(1)->codeGen(generator);
// Above the NestedJoin, we restore the LargeQueueSize to what
// was in effect before.
generator->setLargeQueueSize(largeQueueSize);
generator->setRightSideOfOnlj(wasRightSideOfOnlj);
generator->setComputeRowsetRowsAffected(savComputeRowsetRowsAffected);
rightChildIsVsbbInsert = generator->getVSBBInsert();
// Because of the bushy tree optimizer rule, there is a chance that our
// left child is a VSBB Insert, so we need to pass the flag to our parent.
generator->setVSBBInsert(leftChildIsVsbbInsert);
tdb2 = (ComTdb *)(generator->getGenObj());
rightExplainTuple = generator->getExplainTuple();
}
// turn of the the Right Child Only flag. Note we turn it off only after making sure
// that we are in the same NestedJoinFlow::codeGen method that turned it on in the
// first place.
if (tolerateNonFatalError)
generator->setTolerateNonFatalErrorInFlowRightChild(FALSE);
ex_cri_desc * right_child_desc = generator->getCriDesc(Generator::UP);
short returned_instantiated_row_atp_index = -1;
// set the expression generation flag not to generate float
// validation PCode again, as it might be reset above
generator->setGenNoFloatValidatePCode(TRUE);
// only the left child's rows are returned for semi join.
unsigned short returned_tuples = left_child_desc->noTuples();
if (! is_semijoin) {
returned_tuples = right_child_desc->noTuples();
if (nullInstantiatedOutput().entries() > 0)
returned_instantiated_row_atp_index = (short) returned_tuples++;
}
ex_cri_desc * returned_desc = new(space) ex_cri_desc(returned_tuples, space);
ValueIdSet afterPredicates;
if ( is_semijoin || is_antisemijoin || is_leftjoin )
{
afterPredicates = selectionPred();
}
else
{
GenAssert(joinPred().isEmpty(),"NOT joinPred().isEmpty()");
// Since this is a form of TSJ selectionPred() should also be empty
// Since this is a form of TSJ selectionPred() should also be empty
if (getGroupAttr()->isGenericUpdateRoot() AND (NOT selectionPred().isEmpty()))
afterPredicates = selectionPred();
else
GenAssert(selectionPred().isEmpty(),"NOT selectionPred().isEmpty()");
}
ex_expr * lj_expr = 0;
ex_expr * ni_expr = 0;
ULng32 rowlen = 0;
if (nullInstantiatedOutput().entries() > 0)
{
instantiateValuesForLeftJoin(generator,
0, returned_instantiated_row_atp_index,
&lj_expr, &ni_expr,
&rowlen,
NULL // No MapTable required
);
Attributes *attr = 0;
ItemExpr *itemExpr = 0;
for (CollIndex i = 0; i < nullInstantiatedOutput().entries(); i++)
{
itemExpr = nullInstantiatedOutput()[i].getItemExpr();
attr = (generator->getMapInfo( itemExpr->getValueId() ))->getAttr();
// Do not do bulk move because null instantiate expression
// is not set in TDB to save execution time, see ComTdbOnlj below
attr->setBulkMoveable(FALSE);
}
}
// right child's values are not returned for semi join. Remove them.
if (is_semijoin || is_antisemijoin)
generator->removeAll(save_map_table);
// generate after join expression, if present.
if (! afterPredicates.isEmpty())
{
ItemExpr * newPredTree = afterPredicates.rebuildExprTree(ITM_AND,TRUE,TRUE);
exp_gen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&after_expr);
}
//++Triggers
// no characteristic output, remove values that where generated by the children
if (!(getGroupAttr()->getCharacteristicOutputs().entries()))
generator->removeAll(beforeLeftMapTable);
//--Triggers
// get the default value for the buffer size
ULng32 bufferSize = (ULng32) getDefault(GEN_ONLJ_BUFFER_SIZE);
// adjust the default and compute the size of a buffer that can
// accommodate five rows. The number five is an arbitrary number.
// Too low a number means that at execution time row processing might
// be blocked waiting for an empty buffer and too large a number might imply
// waste of memory space
if (rowlen)
{
bufferSize = MAXOF(bufferSize,
SqlBufferNeededSize(5, (Lng32)rowlen, SqlBuffer::NORMAL_));
}
// is this join used to drive mv logging
RelExpr *MvLogExpr = this;
while ((MvLogExpr->child(0)->castToRelExpr()->getOperator() == REL_NESTED_JOIN) ||
(MvLogExpr->child(0)->castToRelExpr()->getOperator() == REL_LEFT_NESTED_JOIN))
MvLogExpr = MvLogExpr->child(0)->castToRelExpr();
while ((MvLogExpr->child(1)->castToRelExpr()->getOperator() == REL_NESTED_JOIN) ||
(MvLogExpr->child(1)->castToRelExpr()->getOperator() == REL_LEFT_NESTED_JOIN) ||
(MvLogExpr->child(1)->castToRelExpr()->getOperator() == REL_NESTED_JOIN_FLOW))
MvLogExpr = MvLogExpr->child(1)->castToRelExpr();
RelExpr *rightChildExpr = MvLogExpr->child(1)->castToRelExpr();
OperatorTypeEnum rightChildOp = rightChildExpr->getOperatorType();
NABoolean usedForMvLogging = FALSE;
#pragma nowarn(1506) // warning elimination
ComTdbOnlj * nlj_tdb =
new(space) ComTdbOnlj(tdb1,
tdb2,
given_desc,
returned_desc,
(queue_index)getDefault(GEN_ONLJ_SIZE_DOWN),
(queue_index)getDefault(GEN_ONLJ_SIZE_UP),
(Cardinality) getGroupAttr()->
getOutputLogPropList()[0]->
getResultCardinality().value(),
getDefault(GEN_ONLJ_NUM_BUFFERS),
bufferSize,
before_expr,
after_expr,
lj_expr, 0,
0,
0,
rowlen,
is_semijoin,
is_antisemijoin,
is_leftjoin,
is_undojoin,
is_setnferror,
isRowsetIterator(),
isIndexJoin(),
rightChildIsVsbbInsert,
getRowsetRowCountArraySize(),
tolerateNonFatalError,
usedForMvLogging
);
#pragma warn(1506) // warning elimination
// getRowsetRowCountArraySize() should return positive values
// only if isRowsetIterator() returns TRUE.
GenAssert((((getRowsetRowCountArraySize() > 0) && isRowsetIterator()) ||
(getRowsetRowCountArraySize() == 0)),
"Incorrect value returned by getRowsetRowCountArray()");
generator->initTdbFields(nlj_tdb);
// Make sure that the LHS up queue can grow as large as the RHS down
// queue.
if(tdb1->getMaxQueueSizeUp() < tdb2->getInitialQueueSizeDown()) {
tdb1->setMaxQueueSizeUp(tdb2->getInitialQueueSizeDown());
}
// If this NestedJoin itself is not on the RHS of a Flow/NestedJoin,
// Then reset the largeQueueSize to 0.
if(NOT generator->getRightSideOfFlow()) {
generator->setLargeQueueSize(0);
}
// If it does not have two children, this is index maintenance code and
// should not be Explained
if(!generator->explainDisabled()) {
generator->setExplainTuple(
addExplainInfo(nlj_tdb, leftExplainTuple, rightExplainTuple, generator));
}
// restore the original down cri desc since this node changed it.
generator->setCriDesc(given_desc, Generator::DOWN);
// set the new up cri desc.
generator->setCriDesc(returned_desc, Generator::UP);
generator->setGenObj(this, nlj_tdb);
// reset the expression generation flag to generate float validation pcode
generator->setGenNoFloatValidatePCode(FALSE);
// reset the handleIndirectVC flag to its initial value
exp_gen->setHandleIndirectVC( vcflag );
return 0;
}
short NestedJoinFlow::codeGen(Generator * generator)
{
CostScalar numberOfInputRows = getInputCardinality();
if ((numberOfInputRows > 1) &&
(child(1)) &&
((child(1)->getOperatorType() == REL_HBASE_DELETE) ||
(child(1)->getOperatorType() == REL_HBASE_UPDATE)) &&
(CmpCommon::getDefault(HBASE_SQL_IUD_SEMANTICS) == DF_ON) &&
(CmpCommon::getDefault(HBASE_UPDEL_CURSOR_OPT) == DF_ON))
{
setOperatorType(REL_NESTED_JOIN);
return NestedJoin::codeGen(generator);
}
ExpGenerator * exp_gen = generator->getExpGenerator();
MapTable * map_table = generator->getMapTable();
Space * space = generator->getSpace();
////////////////////////////////////////////////////////////////////////////
//
// Layout of row returned by this node.
//
// |---------------------------------|
// | input data | left child's data |
// | ( I tupps ) | ( L tupps ) |
// |---------------------------------|
//
// <-- returned row from left ------->
// <- returned row from right ------->
//
// input data: the atp input to this node by its parent.
// left child data: tupps appended by the left child
//
// Returned row to parent contains:
//
// I + L tupps, since this operator doesn't produce any output.
//
////////////////////////////////////////////////////////////////////////////
ex_cri_desc * given_desc = generator->getCriDesc(Generator::DOWN);
ComTdb * tdb1 = NULL;
ComTdb * tdb2 = NULL;
ExplainTuple *leftExplainTuple = NULL;
ExplainTuple *rightExplainTuple = NULL;
NABoolean tolerateNonFatalError = FALSE;
if(child(0) && child(1)) {
// generate code for left child tree
child(0)->codeGen(generator);
tdb1 = (ComTdb *)(generator->getGenObj());
leftExplainTuple = generator->getExplainTuple();
if (child(0)->isRowsetIterator())
{
setRowsetIterator(TRUE);
if (child(0)->getTolerateNonFatalError() == RelExpr::NOT_ATOMIC_)
{
tolerateNonFatalError = TRUE;
generator->setTolerateNonFatalError(TRUE);
generator->setTolerateNonFatalErrorInFlowRightChild(TRUE);
}
}
generator->setTupleFlowLeftChildAttrs(child(0)->getGroupAttr());
}
ex_cri_desc * left_child_desc = generator->getCriDesc(Generator::UP);
if(child(0) && child(1)) {
// give to the second child the returned descriptor from first child
generator->setCriDesc(left_child_desc, Generator::DOWN);
// remember that we're code gen'ing the right side of a tuple flow.
NABoolean wasRightSideOfFlow = generator->getRightSideOfTupleFlow();
generator->setRightSideOfTupleFlow(TRUE);
// RHS of Flow starts with LargeQueueSizes not in use (0).
// If a SplitTop is found, it may set the largeQueueSize to
// an appropriate value.
ULng32 largeQueueSize = generator->getLargeQueueSize();
generator->setLargeQueueSize(0);
// generate code for right child tree
child(1)->codeGen(generator);
// Above the Flow, we restore the LargeQueueSize to what
// was in effect before.
generator->setLargeQueueSize(largeQueueSize);
generator->setRightSideOfTupleFlow(wasRightSideOfFlow);
tdb2 = (ComTdb *)(generator->getGenObj());
rightExplainTuple = generator->getExplainTuple();
}
// turn of the the Right Child Only flag. Note we turn it off only after making sure
// that we are in the same NestedJoinFlow::codeGen method that turned it on in the
// first place.
if (tolerateNonFatalError)
generator->setTolerateNonFatalErrorInFlowRightChild(FALSE);
ex_cri_desc * right_child_desc = generator->getCriDesc(Generator::UP);
// only the left child's rows are returned for semi join.
unsigned short returned_tuples = 0;
#ifdef _DEBUG
if (getenv("RI_DEBUG"))
returned_tuples = right_child_desc->noTuples();
else
returned_tuples = left_child_desc->noTuples();
#else
returned_tuples = left_child_desc->noTuples();
#endif
returned_tuples = right_child_desc->noTuples();
ex_cri_desc * returned_desc = new(space) ex_cri_desc(returned_tuples, space);
ComTdbTupleFlow * tflow_tdb =
new(space) ComTdbTupleFlow(tdb1,
tdb2,
given_desc,
returned_desc,
0, // no tgt Expr yet
0, // no work cri desc yet
(queue_index)getDefault(GEN_TFLO_SIZE_DOWN),
(queue_index)getDefault(GEN_TFLO_SIZE_UP),
(Cardinality) getGroupAttr()->
getOutputLogPropList()[0]->
getResultCardinality().value(),
#pragma nowarn(1506) // warning elimination
getDefault(GEN_TFLO_NUM_BUFFERS),
getDefault(GEN_TFLO_BUFFER_SIZE),
generator->getVSBBInsert(),
isRowsetIterator(),
tolerateNonFatalError);
#pragma warn(1506) // warning elimination
generator->initTdbFields(tflow_tdb);
// turn off the VSBB insert flag in the generator, it has been
// processed and we don't want other nodes to use it by mistake
generator->setVSBBInsert(FALSE);
tflow_tdb->setUserSidetreeInsert(generator->getUserSidetreeInsert());
// If this Flow itself is not on the RHS of a Flow/NestedJoin,
// Then reset the largeQueueSize to 0.
if(NOT generator->getRightSideOfFlow()) {
generator->setLargeQueueSize(0);
}
tflow_tdb->setSendEODtoTgt(sendEODtoTgt_);
if(!generator->explainDisabled())
generator->setExplainTuple(addExplainInfo(
tflow_tdb, leftExplainTuple, rightExplainTuple, generator));
// restore the original down cri desc since this node changed it.
generator->setCriDesc(given_desc, Generator::DOWN);
// set the new up cri desc.
generator->setCriDesc(returned_desc, Generator::UP);
generator->setGenObj(this, tflow_tdb);
return 0;
}
short Join::instantiateValuesForLeftJoin(Generator * generator,
short atp, short atp_index,
ex_expr ** lj_expr,
ex_expr ** ni_expr,
ULng32 * rowlen,
MapTable ** newMapTable,
ExpTupleDesc::TupleDataFormat tdf)
{
//////////////////////////////////////////////////////////////////////////////
// Special handling for left joins:
// A null instantiated row is represented as a missing entry at the
// atp_index of the row(s) coming up from the right child. Any use of
// a value from this (missing) row is treated as a null value.
// This works well for the case when the output of the right child
// preserves null. That is, the output becomes null if its operand
// from the right side is null.
//
// Nulls are not preserved in two cases:
//
// 1) If output of the right child depends upon its input.
// For example:
// select * from t1 left join (select 10 from t2) on ...
// In this example, the constant 10 is needed to evaluate the output coming
// in from the right child, but the constant 10 is an input to the right
// child and has space allocated at atp_index = 0. So even if the row from
// the right is 'missing', the output value will be 10.
//
// 2) If output of the right is involved in certain expressions which
// do not return null if their operand is null.
// For example:
// select * from t1 left join (select case when a is null then 10 end from t2)
// In this case, the output of right will become 10 if the column 'a' from
// right table t2 is missing. But that is not correct. The correct value
// is a null value for the whole expression, if left join doesn't find
// a match.
//
// To handle these cases, the rows from the right are instantiated before
// returning back from the Join node.
// Two expressions are generated to do this. One, for the case when a match
// is found. The right expression is evaluated and its result moved to a separate
// tupp. Two, for the case when a match is not found. Then, a null value is
// moved to the location of the expression result.
//
//////////////////////////////////////////////////////////////////////////////
ExpGenerator * exp_gen = generator->getExpGenerator();
MapTable * map_table = generator->getMapTable();
Space * space = generator->getSpace();
ExpTupleDesc::TupleDataFormat tupleFormat = generator->getInternalFormat();
if (tdf != ExpTupleDesc::UNINITIALIZED_FORMAT)
{
tupleFormat = tdf;
}
exp_gen->generateContiguousMoveExpr(nullInstantiatedOutput(),
0, // don't add convert nodes
atp,
atp_index,
tupleFormat,
*rowlen,
lj_expr,
0, // no need for ExpTupleDesc * tupleDesc
ExpTupleDesc::SHORT_FORMAT,
newMapTable);
// generate expression to move null values to instantiate buffer.
ValueIdSet null_val_id_set;
for (CollIndex i = 0; i < nullInstantiatedOutput().entries(); i++)
{
ValueId val_id = nullInstantiatedOutput()[i];
ConstValue * const_value = exp_gen->generateNullConst(val_id.getType());
ItemExpr * ie = new(generator->wHeap())
Cast(const_value, &(val_id.getType()));
ie->bindNode(generator->getBindWA());
generator->addMapInfo(ie->getValueId(),
generator->getMapInfo(val_id)->getAttr());
null_val_id_set.insert(ie->getValueId());
}
ULng32 rowlen2=0;
exp_gen->generateContiguousMoveExpr(null_val_id_set,
0, // don't add convert nodes
atp, atp_index,
tupleFormat,
rowlen2,
ni_expr,
0, // no need for ExpTupleDesc * tupleDesc
ExpTupleDesc::SHORT_FORMAT);
GenAssert(rowlen2 == *rowlen, "Unexpected row length from expression");
return 0;
}
short Join::instantiateValuesForRightJoin(Generator * generator,
short atp, short atp_index,
ex_expr ** rj_expr,
ex_expr ** ni_expr,
ULng32 * rowlen,
MapTable ** newMapTable,
ExpTupleDesc::TupleDataFormat tdf)
{
ExpGenerator * exp_gen = generator->getExpGenerator();
MapTable * map_table = generator->getMapTable();
Space * space = generator->getSpace();
// Don't need a MapTable back. At this point, we have generated all
// the necessary expressions. This code is here to be consistent with the
// this one's counterpart - instantiateValuesForLeftJoin
GenAssert((newMapTable == NULL), "Don't need a Maptable back");
ExpTupleDesc::TupleDataFormat tupleFormat = generator->getInternalFormat();
if (tdf != ExpTupleDesc::UNINITIALIZED_FORMAT)
{
tupleFormat = tdf;
}
exp_gen->generateContiguousMoveExpr(nullInstantiatedForRightJoinOutput(),
0, // don't add convert nodes
atp, atp_index,
tupleFormat,
*rowlen,
rj_expr,
0, // no need for ExpTupleDesc * tupleDesc
ExpTupleDesc::SHORT_FORMAT,
newMapTable);
// generate expression to move null values to instantiate buffer.
ValueIdSet null_val_id_set;
for (CollIndex i = 0; i < nullInstantiatedForRightJoinOutput().entries(); i++)
{
ValueId val_id = nullInstantiatedForRightJoinOutput()[i];
ConstValue * const_value = exp_gen->generateNullConst(val_id.getType());
ItemExpr * ie = new(generator->wHeap())
Cast(const_value, &(val_id.getType()));
ie->bindNode(generator->getBindWA());
generator->addMapInfo(ie->getValueId(),
generator->getMapInfo(val_id)->getAttr());
null_val_id_set.insert(ie->getValueId());
}
ULng32 rowlen2=0;
exp_gen->generateContiguousMoveExpr(null_val_id_set,
0, // don't add convert nodes
atp, atp_index,
tupleFormat,
rowlen2,
ni_expr,
0, // no need for ExpTupleDesc * tupleDesc
ExpTupleDesc::SHORT_FORMAT);
GenAssert(rowlen2 == *rowlen, "Unexpected row length from expression");
return 0;
}
| 1 | 17,952 | I take it memQuotaRatio is an "out" parameter at line 1810? Also, why use MIN_QUOTA in both the cases of memQuota being too low and too high? Contrast this code with RelRoot::codeGen where we set MIN_QUOTA only in the too low case. | apache-trafodion | cpp |
@@ -49,7 +49,6 @@ func NewPullSubscription(name, namespace string, so ...PullSubscriptionOption) *
for _, opt := range so {
opt(s)
}
- s.SetDefaults(gcpauthtesthelper.ContextWithDefaults())
return s
}
| 1 | /*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"time"
gcpauthtesthelper "github.com/google/knative-gcp/pkg/apis/configs/gcpauth/testhelper"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
"github.com/google/knative-gcp/pkg/apis/intevents/v1beta1"
)
// PullSubscriptionOption enables further configuration of a PullSubscription.
type PullSubscriptionOption func(*v1beta1.PullSubscription)
const (
SubscriptionID = "subID"
)
// NewPullSubscription creates a PullSubscription with PullSubscriptionOptions
func NewPullSubscription(name, namespace string, so ...PullSubscriptionOption) *v1beta1.PullSubscription {
s := &v1beta1.PullSubscription{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
for _, opt := range so {
opt(s)
}
s.SetDefaults(gcpauthtesthelper.ContextWithDefaults())
return s
}
// NewPullSubscriptionWithNoDefaults creates a PullSubscription with
// PullSubscriptionOptions but does not set defaults.
func NewPullSubscriptionWithNoDefaults(name, namespace string, so ...PullSubscriptionOption) *v1beta1.PullSubscription {
s := &v1beta1.PullSubscription{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
for _, opt := range so {
opt(s)
}
return s
}
// NewPullSubscriptionWithoutNamespace creates a PullSubscription with PullSubscriptionOptions but without a specific namespace
func NewPullSubscriptionWithoutNamespace(name string, so ...PullSubscriptionOption) *v1beta1.PullSubscription {
s := &v1beta1.PullSubscription{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
for _, opt := range so {
opt(s)
}
s.SetDefaults(gcpauthtesthelper.ContextWithDefaults())
return s
}
func WithPullSubscriptionUID(uid types.UID) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.UID = uid
}
}
func WithPullSubscriptionGenerateName(generateName string) PullSubscriptionOption {
return func(c *v1beta1.PullSubscription) {
c.ObjectMeta.GenerateName = generateName
}
}
// WithInitPullSubscriptionConditions initializes the PullSubscriptions's conditions.
func WithInitPullSubscriptionConditions(s *v1beta1.PullSubscription) {
s.Status.InitializeConditions()
}
func WithPullSubscriptionSink(gvk metav1.GroupVersionKind, name string) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Spec.Sink = duckv1.Destination{
Ref: &duckv1.KReference{
APIVersion: apiVersion(gvk),
Kind: gvk.Kind,
Name: name,
},
}
}
}
func WithPullSubscriptionTransformer(gvk metav1.GroupVersionKind, name string) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Spec.Transformer = &duckv1.Destination{
Ref: &duckv1.KReference{
APIVersion: apiVersion(gvk),
Kind: gvk.Kind,
Name: name,
},
}
}
}
func WithPullSubscriptionMarkSink(uri *apis.URL) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.MarkSink(uri)
}
}
func WithPullSubscriptionMarkTransformer(uri *apis.URL) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.MarkTransformer(uri)
}
}
func WithPullSubscriptionMarkNoTransformer(reason, message string) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.MarkNoTransformer(reason, message)
}
}
func WithPullSubscriptionMarkSubscribed(subscriptionID string) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.MarkSubscribed(subscriptionID)
}
}
func WithPullSubscriptionSubscriptionID(subscriptionID string) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.SubscriptionID = subscriptionID
}
}
func WithPullSubscriptionProjectID(projectID string) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.ProjectID = projectID
}
}
func WithPullSubscriptionTransformerURI(uri *apis.URL) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.TransformerURI = uri
}
}
func WithPullSubscriptionMarkNoSubscription(reason, message string) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.MarkNoSubscription(reason, message)
}
}
func WithPullSubscriptionMarkDeployed(name, namespace string) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.PropagateDeploymentAvailability(NewDeployment(name, namespace, WithDeploymentAvailable()))
}
}
func WithPullSubscriptionMarkNoDeployed(name, namespace string) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.PropagateDeploymentAvailability(NewDeployment(name, namespace))
}
}
func WithPullSubscriptionSpec(spec v1beta1.PullSubscriptionSpec) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Spec = spec
s.Spec.SetDefaults(gcpauthtesthelper.ContextWithDefaults())
}
}
// Same as withPullSubscriptionSpec but does not set defaults
func WithPullSubscriptionSpecWithNoDefaults(spec v1beta1.PullSubscriptionSpec) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Spec = spec
}
}
func WithPullSubscriptionReady(sink *apis.URL) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.InitializeConditions()
s.Status.MarkSink(sink)
s.Status.PropagateDeploymentAvailability(NewDeployment("any", "any", WithDeploymentAvailable()))
s.Status.MarkSubscribed(SubscriptionID)
}
}
func WithPullSubscriptionFailed() PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.InitializeConditions()
s.Status.MarkNoSink("InvalidSink",
`failed to get ref &ObjectReference{Kind:Sink,Namespace:testnamespace,Name:sink,UID:,APIVersion:testing.cloud.google.com/v1beta1,ResourceVersion:,FieldPath:,}: sinks.testing.cloud.google.com "sink" not found`)
}
}
func WithPullSubscriptionUnknown() PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.InitializeConditions()
}
}
func WithPullSubscriptionJobFailure(subscriptionID, reason, message string) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.SubscriptionID = subscriptionID
s.Status.MarkNoSubscription(reason, message)
}
}
func WithPullSubscriptionSinkNotFound() PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.MarkNoSink("InvalidSink",
`failed to get ref &ObjectReference{Kind:Sink,Namespace:testnamespace,Name:sink,UID:,APIVersion:testing.cloud.google.com/v1beta1,ResourceVersion:,FieldPath:,}: sinks.testing.cloud.google.com "sink" not found`)
}
}
func WithPullSubscriptionDeleted(s *v1beta1.PullSubscription) {
t := metav1.NewTime(time.Unix(1e9, 0))
s.ObjectMeta.SetDeletionTimestamp(&t)
}
func WithPullSubscriptionOwnerReferences(ownerReferences []metav1.OwnerReference) PullSubscriptionOption {
return func(c *v1beta1.PullSubscription) {
c.ObjectMeta.OwnerReferences = ownerReferences
}
}
func WithPullSubscriptionLabels(labels map[string]string) PullSubscriptionOption {
return func(c *v1beta1.PullSubscription) {
c.ObjectMeta.Labels = labels
}
}
func WithPullSubscriptionAnnotations(annotations map[string]string) PullSubscriptionOption {
return func(c *v1beta1.PullSubscription) {
c.ObjectMeta.Annotations = annotations
}
}
func WithPullSubscriptionFinalizers(finalizers ...string) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Finalizers = finalizers
}
}
func WithPullSubscriptionStatusObservedGeneration(generation int64) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.Status.ObservedGeneration = generation
}
}
func WithPullSubscriptionObjectMetaGeneration(generation int64) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.ObjectMeta.Generation = generation
}
}
func WithPullSubscriptionReadyStatus(status corev1.ConditionStatus, reason, message string) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Status.Conditions = []apis.Condition{{
Type: apis.ConditionReady,
Status: status,
Reason: reason,
Message: message,
}}
}
}
func WithPullSubscriptionMode(mode v1beta1.ModeType) PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Spec.Mode = mode
}
}
func WithPullSubscriptionDefaultGCPAuth() PullSubscriptionOption {
return func(s *v1beta1.PullSubscription) {
s.Spec.PubSubSpec.SetPubSubDefaults(gcpauthtesthelper.ContextWithDefaults())
}
}
| 1 | 15,586 | It looks like for newpullsubscription, we have two functions. One for pullsubscription with default, one for pullsubscription without default. If you remove setdefault here, then there is no differences between these two functions. | google-knative-gcp | go |
@@ -186,6 +186,13 @@ type AssetParams struct {
// created.
Total uint64 `codec:"t"`
+ // Decimals specifies the number of digits to display after the decimal
+ // place when displaying this asset. A value of 0 represents an asset
+ // that is not divisible, a value of 1 represents an asset divisible
+ // into tenths, and so on. This value must be between 0 and 19
+ // (inclusive).
+ Decimals uint32 `codec:"dc"`
+
// DefaultFrozen specifies whether slots for this asset
// in user accounts are frozen by default or not.
DefaultFrozen bool `codec:"df"` | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package basics
import (
"reflect"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
// Status is the delegation status of an account's MicroAlgos
type Status byte
const (
// Offline indicates that the associated account is delegated.
Offline Status = iota
// Online indicates that the associated account used as part of the delegation pool.
Online
// NotParticipating indicates that the associated account is neither a delegator nor a delegate. Currently it is reserved for the incentive pool.
NotParticipating
)
func (s Status) String() string {
switch s {
case Offline:
return "Offline"
case Online:
return "Online"
case NotParticipating:
return "Not Participating"
}
return ""
}
// AccountData contains the data associated with a given address.
//
// This includes the account balance, delegation keys, delegation status, and a custom note.
type AccountData struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Status Status `codec:"onl"`
MicroAlgos MicroAlgos `codec:"algo"`
// RewardsBase is used to implement rewards.
// This is not meaningful for accounts with Status=NotParticipating.
//
// Every block assigns some amount of rewards (algos) to every
// participating account. The amount is the product of how much
// block.RewardsLevel increased from the previous block and
// how many whole config.Protocol.RewardUnit algos this
// account holds.
//
// For performance reasons, we do not want to walk over every
// account to apply these rewards to AccountData.MicroAlgos. Instead,
// we defer applying the rewards until some other transaction
// touches that participating account, and at that point, apply all
// of the rewards to the account's AccountData.MicroAlgos.
//
// For correctness, we need to be able to determine how many
// total algos are present in the system, including deferred
// rewards (deferred in the sense that they have not been
// reflected in the account's AccountData.MicroAlgos, as described
// above). To compute this total efficiently, we avoid
// compounding rewards (i.e., no rewards on rewards) until
// they are applied to AccountData.MicroAlgos.
//
// Mechanically, RewardsBase stores the block.RewardsLevel
// whose rewards are already reflected in AccountData.MicroAlgos.
// If the account is Status=Offline or Status=Online, its
// effective balance (if a transaction were to be issued
// against this account) may be higher, as computed by
// AccountData.Money(). That function calls
// AccountData.WithUpdatedRewards() to apply the deferred
// rewards to AccountData.MicroAlgos.
RewardsBase uint64 `codec:"ebase"`
// RewardedMicroAlgos is used to track how many algos were given
// to this account since the account was first created.
//
// This field is updated along with RewardBase; note that
// it won't answer the question "how many algos did I make in
// the past week".
RewardedMicroAlgos MicroAlgos `codec:"ern"`
VoteID crypto.OneTimeSignatureVerifier `codec:"vote"`
SelectionID crypto.VRFVerifier `codec:"sel"`
VoteFirstValid Round `codec:"voteFst"`
VoteLastValid Round `codec:"voteLst"`
VoteKeyDilution uint64 `codec:"voteKD"`
// If this account created an asset, AssetParams stores
// the parameters defining that asset. The params are indexed
// by the Index of the AssetID; the Creator is this account's address.
//
// An account with any asset in AssetParams cannot be
// closed, until the asset is destroyed. An asset can
// be destroyed if this account holds AssetParams.Total units
// of that asset (in the Assets array below).
//
// NOTE: do not modify this value in-place in existing AccountData
// structs; allocate a copy and modify that instead. AccountData
// is expected to have copy-by-value semantics.
AssetParams map[AssetIndex]AssetParams `codec:"apar"`
// Assets is the set of assets that can be held by this
// account. Assets (i.e., slots in this map) are explicitly
// added and removed from an account by special transactions.
// The map is keyed by the AssetID, which is the address of
// the account that created the asset plus a unique counter
// to distinguish re-created assets.
//
// Each asset bumps the required MinBalance in this account.
//
// An account that creates an asset must have its own asset
// in the Assets map until that asset is destroyed.
//
// NOTE: do not modify this value in-place in existing AccountData
// structs; allocate a copy and modify that instead. AccountData
// is expected to have copy-by-value semantics.
Assets map[AssetIndex]AssetHolding `codec:"asset"`
}
// AccountDetail encapsulates meaningful details about a given account, for external consumption
type AccountDetail struct {
Address Address
Algos MicroAlgos
Status Status
}
// SupplyDetail encapsulates meaningful details about the ledger's current token supply
type SupplyDetail struct {
Round Round
TotalMoney MicroAlgos
OnlineMoney MicroAlgos
}
// BalanceDetail encapsulates meaningful details about the current balances of the ledger, for external consumption
type BalanceDetail struct {
Round Round
TotalMoney MicroAlgos
OnlineMoney MicroAlgos
Accounts []AccountDetail
}
// AssetIndex is the unique integer index of an asset that can be used to look
// up the creator of the asset, whose balance record contains the AssetParams
type AssetIndex uint64
// AssetLocator stores both the asset creator, whose balance record contains
// the asset parameters, and the asset index, which is the key into those
// parameters
type AssetLocator struct {
Creator Address
Index AssetIndex
}
// AssetHolding describes an asset held by an account.
type AssetHolding struct {
Amount uint64 `codec:"a"`
Frozen bool `codec:"f"`
}
// AssetParams describes the parameters of an asset.
type AssetParams struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
// Total specifies the total number of units of this asset
// created.
Total uint64 `codec:"t"`
// DefaultFrozen specifies whether slots for this asset
// in user accounts are frozen by default or not.
DefaultFrozen bool `codec:"df"`
// UnitName specifies a hint for the name of a unit of
// this asset.
UnitName string `codec:"un"`
// AssetName specifies a hint for the name of the asset.
AssetName string `codec:"an"`
// URL specifies a URL where more information about the asset can be
// retrieved
URL string `codec:"au"`
// MetadataHash specifies a commitment to some unspecified asset
// metadata. The format of this metadata is up to the application.
MetadataHash [32]byte `codec:"am"`
// Manager specifies an account that is allowed to change the
// non-zero addresses in this AssetParams.
Manager Address `codec:"m"`
// Reserve specifies an account whose holdings of this asset
// should be reported as "not minted".
Reserve Address `codec:"r"`
// Freeze specifies an account that is allowed to change the
// frozen state of holdings of this asset.
Freeze Address `codec:"f"`
// Clawback specifies an account that is allowed to take units
// of this asset from any account.
Clawback Address `codec:"c"`
}
// MakeAccountData returns a UserToken
func MakeAccountData(status Status, algos MicroAlgos) AccountData {
return AccountData{Status: status, MicroAlgos: algos}
}
// Money returns the amount of MicroAlgos associated with the user's account
func (u AccountData) Money(proto config.ConsensusParams, rewardsLevel uint64) (money MicroAlgos, rewards MicroAlgos) {
e := u.WithUpdatedRewards(proto, rewardsLevel)
return e.MicroAlgos, e.RewardedMicroAlgos
}
// WithUpdatedRewards returns an updated number of algos in an AccountData
// to reflect rewards up to some rewards level.
func (u AccountData) WithUpdatedRewards(proto config.ConsensusParams, rewardsLevel uint64) AccountData {
if u.Status != NotParticipating {
var ot OverflowTracker
rewardsUnits := u.MicroAlgos.RewardUnits(proto)
rewardsDelta := ot.Sub(rewardsLevel, u.RewardsBase)
rewards := MicroAlgos{Raw: ot.Mul(rewardsUnits, rewardsDelta)}
u.MicroAlgos = ot.AddA(u.MicroAlgos, rewards)
if ot.Overflowed {
logging.Base().Panicf("AccountData.WithUpdatedRewards(): overflowed account balance when applying rewards %v + %d*(%d-%d)", u.MicroAlgos, rewardsUnits, rewardsLevel, u.RewardsBase)
}
u.RewardsBase = rewardsLevel
// The total reward over the lifetime of the account could exceed a 64-bit value. As a result
// this rewardAlgos counter could potentially roll over.
u.RewardedMicroAlgos = MicroAlgos{Raw: (u.RewardedMicroAlgos.Raw + rewards.Raw)}
}
return u
}
// VotingStake returns the amount of MicroAlgos associated with the user's account
// for the purpose of participating in the Algorand protocol. It assumes the
// caller has already updated rewards appropriately using WithUpdatedRewards().
func (u AccountData) VotingStake() MicroAlgos {
if u.Status != Online {
return MicroAlgos{Raw: 0}
}
return u.MicroAlgos
}
// KeyDilution returns the key dilution for this account,
// returning the default key dilution if not explicitly specified.
func (u AccountData) KeyDilution(proto config.ConsensusParams) uint64 {
if u.VoteKeyDilution != 0 {
return u.VoteKeyDilution
}
return proto.DefaultKeyDilution
}
// IsZero checks if an AccountData value is the same as its zero value.
func (u AccountData) IsZero() bool {
if u.Assets != nil && len(u.Assets) == 0 {
u.Assets = nil
}
return reflect.DeepEqual(u, AccountData{})
}
// BalanceRecord pairs an account's address with its associated data.
type BalanceRecord struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Addr Address `codec:"addr"`
AccountData
}
// ToBeHashed implements the crypto.Hashable interface
func (u BalanceRecord) ToBeHashed() (protocol.HashID, []byte) {
return protocol.BalanceRecord, protocol.Encode(u)
}
| 1 | 36,984 | not dramatic, but why 32bits if we allow a range that is smaller than 8? | algorand-go-algorand | go |
@@ -0,0 +1,8 @@
+package dto
+
+type ClientPromise struct {
+ SerialNumber int
+ IssuerId int
+ BenefiterId int
+ Amount int
+} | 1 | 1 | 9,449 | - Use type from `service_discovery/dto/price.go` - Rename `Price` -> `Money` | mysteriumnetwork-node | go |
|
@@ -193,6 +193,18 @@ public abstract class Either<L, R> implements io.vavr.Iterable<R>, io.vavr.Value
/**
* Maps either the left or the right side of this disjunction.
*
+ * <pre>{@code
+ *Either<?, AtomicInteger> success = Either.right(new AtomicInteger(42));
+ *
+ * //prints "Right(42)"
+ * System.out.println(success.bimap(Function1.identity(), AtomicInteger::get));
+ *
+ * Either<Exception, ?> failure = Either.left(new Exception("error"));
+ *
+ * //prints "Left(error)"
+ * System.out.println(failure.bimap(Exception::getMessage, Function1.identity()));
+ * }</pre>
+ *
* @param leftMapper maps the left value if this is a Left
* @param rightMapper maps the right value if this is a Right
* @param <X> The new left type of the resulting Either | 1 | /* ____ ______________ ________________________ __________
* \ \/ / \ \/ / __/ / \ \/ / \
* \______/___/\___\______/___/_____/___/\___\______/___/\___\
*
* Copyright 2019 Vavr, http://vavr.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.vavr.control;
import io.vavr.collection.Iterator;
import io.vavr.collection.Seq;
import io.vavr.collection.Vector;
import java.io.Serializable;
import java.lang.Iterable;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
/**
* Either represents a value of two possible types. An Either is either a {@link Left} or a
* {@link Right}.
* <p>
* If the given Either is a Right and projected to a Left, the Left operations have no effect on the Right value.<br>
* If the given Either is a Left and projected to a Right, the Right operations have no effect on the Left value.<br>
* If a Left is projected to a Left or a Right is projected to a Right, the operations have an effect.
* <p>
* <strong>Example:</strong> A compute() function, which results either in an Integer value (in the case of success) or
* in an error message of type String (in the case of failure). By convention the success case is Right and the failure
* is Left.
*
* <pre>
* <code>
* Either<String,Integer> value = compute().right().map(i -> i * 2).toEither();
* </code>
* </pre>
*
* If the result of compute() is Right(1), the value is Right(2).<br>
* If the result of compute() is Left("error"), the value is Left("error").
*
* @param <L> The type of the Left value of an Either.
* @param <R> The type of the Right value of an Either.
*/
@SuppressWarnings("deprecation")
public abstract class Either<L, R> implements io.vavr.Iterable<R>, io.vavr.Value<R>, Serializable {
private static final long serialVersionUID = 1L;
// sealed
private Either() {
}
/**
* Constructs a {@link Right}
*
* <pre>{@code
* // Creates Either instance initiated with right value 1
* Either<?, Integer> either = Either.right(1);
* }</pre>
*
* @param right The value.
* @param <L> Type of left value.
* @param <R> Type of right value.
* @return A new {@code Right} instance.
*/
public static <L, R> Either<L, R> right(R right) {
return new Right<>(right);
}
/**
* Constructs a {@link Left}
*
* <pre>{@code
* // Creates Either instance initiated with left value "error message"
* Either<String, ?> either = Either.left("error message");
* }</pre>
*
* @param left The value.
* @param <L> Type of left value.
* @param <R> Type of right value.
* @return A new {@code Left} instance.
*/
public static <L, R> Either<L, R> left(L left) {
return new Left<>(left);
}
/**
* Narrows a widened {@code Either<? extends L, ? extends R>} to {@code Either<L, R>}
* by performing a type-safe cast. This is eligible because immutable/read-only
* collections are covariant.
* <pre>{@code
*
* // It's ok, Integer inherits from Number
* Either<?, Number> answer = Either.right(42);
*
* // RuntimeException is an Exception
* Either<Exception, ?> failed = Either.left(new RuntimeException("Vogon poetry recital"));
*
* }</pre>
*
* @param either A {@code Either}.
* @param <L> Type of left value.
* @param <R> Type of right value.
* @return the given {@code either} instance as narrowed type {@code Either<L, R>}.
*/
@SuppressWarnings("unchecked")
public static <L, R> Either<L, R> narrow(Either<? extends L, ? extends R> either) {
return (Either<L, R>) either;
}
/**
* Returns the left value.
*
* <pre>{@code
* //prints "error"
* System.out.println(Either.left("error").getLeft());
*
* //throws NoSuchElementException
* System.out.println(Either.right(42).getLeft());
* }</pre>
*
* @return The left value.
* @throws NoSuchElementException if this is a {@code Right}.
*/
public abstract L getLeft();
/**
* Returns whether this Either is a Left.
*
* <pre>{@code
* //prints "true"
* System.out.println(Either.left("error").isLeft());
*
* //prints "false"
* System.out.println(Either.right(42).isLeft());
* }</pre>
*
* @return true, if this is a Left, false otherwise
*/
public abstract boolean isLeft();
/**
* Returns whether this Either is a Right.
*
* <pre>{@code
* //prints "true"
* System.out.println(Either.right(42).isRight());
*
* //prints "false"
* System.out.println(Either.left("error").isRight());
* }</pre>
*
* @return true, if this is a Right, false otherwise
*/
public abstract boolean isRight();
/**
* Returns a LeftProjection of this Either.
*
* @return a new LeftProjection of this
* @deprecated Either is right-biased. Use {@link #swap()} instead of projections.
*/
@Deprecated
public final LeftProjection<L, R> left() {
return new LeftProjection<>(this);
}
/**
* Returns a RightProjection of this Either.
*
* @return a new RightProjection of this
* @deprecated Either is right-biased. Use {@link #swap()} instead of projections.
*/
@Deprecated
public final RightProjection<L, R> right() {
return new RightProjection<>(this);
}
/**
* Maps either the left or the right side of this disjunction.
*
* @param leftMapper maps the left value if this is a Left
* @param rightMapper maps the right value if this is a Right
* @param <X> The new left type of the resulting Either
* @param <Y> The new right type of the resulting Either
* @return A new Either instance
*/
public final <X, Y> Either<X, Y> bimap(Function<? super L, ? extends X> leftMapper, Function<? super R, ? extends Y> rightMapper) {
Objects.requireNonNull(leftMapper, "leftMapper is null");
Objects.requireNonNull(rightMapper, "rightMapper is null");
if (isRight()) {
return new Right<>(rightMapper.apply(get()));
} else {
return new Left<>(leftMapper.apply(getLeft()));
}
}
/**
* Folds either the left or the right side of this disjunction.
*
* @param leftMapper maps the left value if this is a Left
* @param rightMapper maps the right value if this is a Right
* @param <U> type of the folded value
* @return A value of type U
*/
public final <U> U fold(Function<? super L, ? extends U> leftMapper, Function<? super R, ? extends U> rightMapper) {
Objects.requireNonNull(leftMapper, "leftMapper is null");
Objects.requireNonNull(rightMapper, "rightMapper is null");
if (isRight()) {
return rightMapper.apply(get());
} else {
return leftMapper.apply(getLeft());
}
}
/**
* Reduces many {@code Either}s into a single {@code Either} by transforming an
* {@code Iterable<Either<L, R>>} into a {@code Either<Seq<L>, Seq<R>>}.
* <p>
* If any of the given {@code Either}s is a {@link Either.Left} then {@code sequence} returns a
* {@link Either.Left} containing a non-empty {@link Seq} of all left values.
* <p>
* If none of the given {@code Either}s is a {@link Either.Left} then {@code sequence} returns a
* {@link Either.Right} containing a (possibly empty) {@link Seq} of all right values.
*
* <pre>{@code
* // = Right(Seq())
* Either.sequence(List.empty())
*
* // = Right(Seq(1, 2))
* Either.sequence(List.of(Either.right(1), Either.right(2)))
*
* // = Left(Seq("x"))
* Either.sequence(List.of(Either.right(1), Either.left("x")))
* }</pre>
*
* @param eithers An {@link Iterable} of {@code Either}s
* @param <L> closure of all left types of the given {@code Either}s
* @param <R> closure of all right types of the given {@code Either}s
* @return An {@code Either} of a {@link Seq} of left or right values
* @throws NullPointerException if {@code eithers} is null
*/
@SuppressWarnings("unchecked")
public static <L,R> Either<Seq<L>, Seq<R>> sequence(Iterable<? extends Either<? extends L, ? extends R>> eithers) {
Objects.requireNonNull(eithers, "eithers is null");
return Iterator.ofAll((Iterable<Either<L, R>>) eithers)
.partition(Either::isLeft)
.apply((leftPartition, rightPartition) -> leftPartition.hasNext()
? Either.left(leftPartition.map(Either::getLeft).toVector())
: Either.right(rightPartition.map(Either::get).toVector())
);
}
/**
* Maps the values of an iterable to a sequence of mapped values into a single {@code Either} by
* transforming an {@code Iterable<? extends T>} into a {@code Either<Seq<U>>}.
* <p>
*
* @param values An {@code Iterable} of values.
* @param mapper A mapper of values to Eithers
* @param <L> The mapped left value type.
* @param <R> The mapped right value type.
* @param <T> The type of the given values.
* @return A {@code Either} of a {@link Seq} of results.
* @throws NullPointerException if values or f is null.
*/
public static <L, R, T> Either<Seq<L>, Seq<R>> traverse(Iterable<? extends T> values, Function<? super T, ? extends Either<? extends L, ? extends R>> mapper) {
Objects.requireNonNull(values, "values is null");
Objects.requireNonNull(mapper, "mapper is null");
return sequence(Iterator.ofAll(values).map(mapper));
}
/**
* Reduces many {@code Either}s into a single {@code Either} by transforming an
* {@code Iterable<Either<L, R>>} into a {@code Either<L, Seq<R>>}.
* <p>
* If any of the given {@code Either}s is a {@link Either.Left} then {@code sequenceRight} returns a
* {@link Either.Left} containing the first left value (in iteration order).
* <p>
* If none of the given {@code Either}s is a {@link Either.Left} then {@code sequenceRight} returns a
* {@link Either.Right} containing a (possibly empty) {@link Seq} of all right values.
*
* <pre>{@code
* // = Right(Seq())
* Either.sequenceRight(List.empty())
*
* // = Right(Seq(1, 2))
* Either.sequenceRight(List.of(Either.right(1), Either.right(2)))
*
* // = Left("x1")
* Either.sequenceRight(List.of(Either.right(1), Either.left("x1"), Either.left("x2")))
* }</pre>
*
* @param eithers An {@link Iterable} of {@code Either}s
* @param <L> closure of all left types of the given {@code Either}s
* @param <R> closure of all right types of the given {@code Either}s
* @return An {@code Either} of either a {@link Seq} of right values or the first left value, if present.
* @throws NullPointerException if {@code eithers} is null
*/
public static <L,R> Either<L, Seq<R>> sequenceRight(Iterable<? extends Either<? extends L, ? extends R>> eithers) {
Objects.requireNonNull(eithers, "eithers is null");
Vector<R> rightValues = Vector.empty();
for (Either<? extends L, ? extends R> either : eithers) {
if (either.isRight()) {
rightValues = rightValues.append(either.get());
} else {
return Either.left(either.getLeft());
}
}
return Either.right(rightValues);
}
/**
* Maps the values of an iterable to a sequence of mapped values into a single {@code Either} by
* transforming an {@code Iterable<? extends T>} into a {@code Either<Seq<U>>}.
* <p>
*
* @param values An {@code Iterable} of values.
* @param mapper A mapper of values to Eithers
* @param <L> The mapped left value type.
* @param <R> The mapped right value type.
* @param <T> The type of the given values.
* @return A {@code Either} of a {@link Seq} of results.
* @throws NullPointerException if values or f is null.
*/
public static <L, R, T> Either<L, Seq<R>> traverseRight(Iterable<? extends T> values, Function<? super T, ? extends Either<? extends L, ? extends R>> mapper) {
Objects.requireNonNull(values, "values is null");
Objects.requireNonNull(mapper, "mapper is null");
return sequenceRight(Iterator.ofAll(values).map(mapper));
}
/**
* Gets the Right value or an alternate value, if the projected Either is a Left.
*
* @param other a function which converts a Left value to an alternative Right value
* @return the right value, if the underlying Either is a Right or else the alternative Right value provided by
* {@code other} by applying the Left value.
*/
public final R getOrElseGet(Function<? super L, ? extends R> other) {
Objects.requireNonNull(other, "other is null");
if (isRight()) {
return get();
} else {
return other.apply(getLeft());
}
}
/**
* Runs an action in the case this is a projection on a Left value.
*
* @param action an action which consumes a Left value
*/
public final void orElseRun(Consumer<? super L> action) {
Objects.requireNonNull(action, "action is null");
if (isLeft()) {
action.accept(getLeft());
}
}
/**
* Gets the Right value or throws, if the projected Either is a Left.
*
* @param <X> a throwable type
* @param exceptionFunction a function which creates an exception based on a Left value
* @return the right value, if the underlying Either is a Right or else throws the exception provided by
* {@code exceptionFunction} by applying the Left value.
* @throws X if the projected Either is a Left
*/
public final <X extends Throwable> R getOrElseThrow(Function<? super L, X> exceptionFunction) throws X {
Objects.requireNonNull(exceptionFunction, "exceptionFunction is null");
if (isRight()) {
return get();
} else {
throw exceptionFunction.apply(getLeft());
}
}
/**
* Converts a {@code Left} to a {@code Right} vice versa by wrapping the value in a new type.
*
* @return a new {@code Either}
*/
public final Either<R, L> swap() {
if (isRight()) {
return new Left<>(get());
} else {
return new Right<>(getLeft());
}
}
// -- Adjusted return types of Monad methods
/**
* FlatMaps this right-biased Either.
*
* @param mapper A mapper
* @param <U> Component type of the mapped right value
* @return this as {@code Either<L, U>} if this is a Left, otherwise the right mapping result
* @throws NullPointerException if {@code mapper} is null
*/
@SuppressWarnings("unchecked")
public final <U> Either<L, U> flatMap(Function<? super R, ? extends Either<L, ? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isRight()) {
return (Either<L, U>) mapper.apply(get());
} else {
return (Either<L, U>) this;
}
}
/**
* Maps the value of this Either if it is a Right, performs no operation if this is a Left.
*
* <pre><code>
* import static io.vavr.API.*;
*
* // = Right("A")
* Right("a").map(String::toUpperCase);
*
* // = Left(1)
* Left(1).map(String::toUpperCase);
* </code></pre>
*
* @param mapper A mapper
* @param <U> Component type of the mapped right value
* @return a mapped {@code Monad}
* @throws NullPointerException if {@code mapper} is null
*/
@SuppressWarnings("unchecked")
@Override
public final <U> Either<L, U> map(Function<? super R, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isRight()) {
return Either.right(mapper.apply(get()));
} else {
return (Either<L, U>) this;
}
}
/**
* Maps the value of this Either if it is a Left, performs no operation if this is a Right.
*
* <pre>{@code
* import static io.vavr.API.*;
*
* // = Left(2)
* Left(1).mapLeft(i -> i + 1);
*
* // = Right("a")
* Right("a").mapLeft(i -> i + 1);
* }</pre>
*
* @param leftMapper A mapper
* @param <U> Component type of the mapped right value
* @return a mapped {@code Monad}
* @throws NullPointerException if {@code mapper} is null
*/
@SuppressWarnings("unchecked")
public final <U> Either<U, R> mapLeft(Function<? super L, ? extends U> leftMapper) {
Objects.requireNonNull(leftMapper, "leftMapper is null");
if (isLeft()) {
return Either.left(leftMapper.apply(getLeft()));
} else {
return (Either<U, R>) this;
}
}
// -- Adjusted return types of Value methods
/**
* Filters this right-biased {@code Either} by testing a predicate.
* <p>
*
* @param predicate A predicate
* @return a new {@code Option} instance
* @throws NullPointerException if {@code predicate} is null
*/
public final Option<Either<L, R>> filter(Predicate<? super R> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return isLeft() || predicate.test(get()) ? Option.some(this) : Option.none();
}
/**
* Filters this right-biased {@code Either} by testing a predicate.
*
* @param predicate A predicate
* @return a new {@code Either}
* @throws NullPointerException if {@code predicate} is null
*
*/
public final Option<Either<L, R>> filterNot(Predicate<? super R> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return filter(predicate.negate());
}
/**
* Filters this right-biased {@code Either} by testing a predicate.
* If the {@code Either} is a {@code Right} and the predicate doesn't match, the
* {@code Either} will be turned into a {@code Left} with contents computed by applying
* the filterVal function to the {@code Either} value.
*
* <pre>{@code
* import static io.vavr.API.*;
*
* // = Left("bad: a")
* Right("a").filterOrElse(i -> false, val -> "bad: " + val);
*
* // = Right("a")
* Right("a").filterOrElse(i -> true, val -> "bad: " + val);
* }</pre>
*
* @param predicate A predicate
* @param zero A function that turns a right value into a left value if the right value does not make it through the filter.
* @return an {@code Either} instance
* @throws NullPointerException if {@code predicate} is null
*/
public final Either<L,R> filterOrElse(Predicate<? super R> predicate, Function<? super R, ? extends L> zero) {
Objects.requireNonNull(predicate, "predicate is null");
Objects.requireNonNull(zero, "zero is null");
if (isLeft() || predicate.test(get())) {
return this;
} else {
return Either.left(zero.apply(get()));
}
}
/**
* Gets the right value if this is a {@code Right} or throws if this is a {@code Left}.
*
* @return the right value
* @throws NoSuchElementException if this is a {@code Left}.
*/
@Override
public abstract R get();
@Override
public final boolean isEmpty() {
return isLeft();
}
@SuppressWarnings("unchecked")
public final Either<L, R> orElse(Either<? extends L, ? extends R> other) {
Objects.requireNonNull(other, "other is null");
return isRight() ? this : (Either<L, R>) other;
}
@SuppressWarnings("unchecked")
public final Either<L, R> orElse(Supplier<? extends Either<? extends L, ? extends R>> supplier) {
Objects.requireNonNull(supplier, "supplier is null");
return isRight() ? this : (Either<L, R>) supplier.get();
}
/**
* A right-biased {@code Either}'s value is computed synchronously.
*
* @return false
*/
@Override
public final boolean isAsync() {
return false;
}
/**
* A right-biased {@code Either}'s value is computed eagerly.
*
* @return false
*/
@Override
public final boolean isLazy() {
return false;
}
/**
* A right-biased {@code Either} is single-valued.
*
* @return {@code true}
*/
@Override
public final boolean isSingleValued() {
return true;
}
@Override
public final Iterator<R> iterator() {
if (isRight()) {
return Iterator.of(get());
} else {
return Iterator.empty();
}
}
@Override
public final Either<L, R> peek(Consumer<? super R> action) {
Objects.requireNonNull(action, "action is null");
if (isRight()) {
action.accept(get());
}
return this;
}
public final Either<L, R> peekLeft(Consumer<? super L> action) {
Objects.requireNonNull(action, "action is null");
if (isLeft()) {
action.accept(getLeft());
}
return this;
}
/**
* Returns this as {@code Validation}.
*
* @return {@code Validation.valid(get())} if this is right, otherwise {@code Validation.invalid(getLeft())}.
*/
public final Validation<L, R> toValidation() {
return isRight() ? Validation.valid(get()) : Validation.invalid(getLeft());
}
// -- Left/Right projections
/**
* A left projection of an Either.
*
* @param <L> The type of the Left value of an Either.
* @param <R> The type of the Right value of an Either.
* @deprecated Either is right-biased. Use {@link #swap()} instead of projections.
*/
@Deprecated
public static final class LeftProjection<L, R> implements io.vavr.Value<L> {
private final Either<L, R> either;
private LeftProjection(Either<L, R> either) {
this.either = either;
}
public <L2, R2> LeftProjection<L2, R2> bimap(Function<? super L, ? extends L2> leftMapper, Function<? super R, ? extends R2> rightMapper) {
return either.<L2, R2> bimap(leftMapper, rightMapper).left();
}
/**
* A {@code LeftProjection}'s value is computed synchronously.
*
* @return false
*/
@Override
public boolean isAsync() {
return false;
}
@Override
public boolean isEmpty() {
return either.isRight();
}
/**
* A {@code LeftProjection}'s value is computed eagerly.
*
* @return false
*/
@Override
public boolean isLazy() {
return false;
}
/**
* A {@code LeftProjection} is single-valued.
*
* @return {@code true}
*/
@Override
public boolean isSingleValued() {
return true;
}
/**
* Gets the {@code Left} value or throws.
*
* @return the left value, if the underlying {@code Either} is a {@code Left}
* @throws NoSuchElementException if the underlying {@code Either} of this {@code LeftProjection} is a {@code Right}
*/
@Override
public L get() {
if (either.isLeft()) {
return either.getLeft();
} else {
throw new NoSuchElementException("LeftProjection.get() on Right");
}
}
@SuppressWarnings("unchecked")
public LeftProjection<L, R> orElse(LeftProjection<? extends L, ? extends R> other) {
Objects.requireNonNull(other, "other is null");
return either.isLeft() ? this : (LeftProjection<L, R>) other;
}
@SuppressWarnings("unchecked")
public LeftProjection<L, R> orElse(Supplier<? extends LeftProjection<? extends L, ? extends R>> supplier) {
Objects.requireNonNull(supplier, "supplier is null");
return either.isLeft() ? this : (LeftProjection<L, R>) supplier.get();
}
/**
* Gets the Left value or an alternate value, if the projected Either is a Right.
*
* @param other an alternative value
* @return the left value, if the underlying Either is a Left or else {@code other}
* @throws NoSuchElementException if the underlying either of this LeftProjection is a Right
*/
@Override
public L getOrElse(L other) {
return either.isLeft() ? either.getLeft() : other;
}
/**
* Gets the Left value or an alternate value, if the projected Either is a Right.
*
* @param other a function which converts a Right value to an alternative Left value
* @return the left value, if the underlying Either is a Left or else the alternative Left value provided by
* {@code other} by applying the Right value.
*/
public L getOrElseGet(Function<? super R, ? extends L> other) {
Objects.requireNonNull(other, "other is null");
if (either.isLeft()) {
return either.getLeft();
} else {
return other.apply(either.get());
}
}
/**
* Runs an action in the case this is a projection on a Right value.
*
* @param action an action which consumes a Right value
*/
public void orElseRun(Consumer<? super R> action) {
Objects.requireNonNull(action, "action is null");
if (either.isRight()) {
action.accept(either.get());
}
}
/**
* Gets the Left value or throws, if the projected Either is a Right.
*
* @param <X> a throwable type
* @param exceptionFunction a function which creates an exception based on a Right value
* @return the left value, if the underlying Either is a Left or else throws the exception provided by
* {@code exceptionFunction} by applying the Right value.
* @throws X if the projected Either is a Right
*/
public <X extends Throwable> L getOrElseThrow(Function<? super R, X> exceptionFunction) throws X {
Objects.requireNonNull(exceptionFunction, "exceptionFunction is null");
if (either.isLeft()) {
return either.getLeft();
} else {
throw exceptionFunction.apply(either.get());
}
}
/**
* Returns the underlying either of this projection.
*
* @return the underlying either
*/
public Either<L, R> toEither() {
return either;
}
/**
* Returns {@code Some} value of type L if this is a left projection of a Left value and the predicate
* applies to the underlying value.
*
* @param predicate A predicate
* @return A new Option
*/
public Option<LeftProjection<L, R>> filter(Predicate<? super L> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return either.isRight() || predicate.test(either.getLeft()) ? Option.some(this) : Option.none();
}
/**
* FlatMaps this LeftProjection.
*
* @param mapper A mapper
* @param <U> Component type of the mapped left value
* @return this as {@code LeftProjection<L, U>} if a Right is underlying, otherwise a the mapping result of the left value.
* @throws NullPointerException if {@code mapper} is null
*/
@SuppressWarnings("unchecked")
public <U> LeftProjection<U, R> flatMap(Function<? super L, ? extends LeftProjection<? extends U, R>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (either.isLeft()) {
return (LeftProjection<U, R>) mapper.apply(either.getLeft());
} else {
return (LeftProjection<U, R>) this;
}
}
/**
* Maps the left value if the projected Either is a Left.
*
* @param mapper A mapper which takes a left value and returns a value of type U
* @param <U> The new type of a Left value
* @return A new LeftProjection
*/
@SuppressWarnings("unchecked")
@Override
public <U> LeftProjection<U, R> map(Function<? super L, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (either.isLeft()) {
return either.mapLeft((Function<L, U>) mapper).left();
} else {
return (LeftProjection<U, R>) this;
}
}
/**
* Applies the given action to the value if the projected either is a Left. Otherwise nothing happens.
*
* @param action An action which takes a left value
* @return this LeftProjection
*/
@Override
public LeftProjection<L, R> peek(Consumer<? super L> action) {
Objects.requireNonNull(action, "action is null");
if (either.isLeft()) {
action.accept(either.getLeft());
}
return this;
}
/**
* Transforms this {@code LeftProjection}.
*
* @param f A transformation
* @param <U> Type of transformation result
* @return An instance of type {@code U}
* @throws NullPointerException if {@code f} is null
*/
public <U> U transform(Function<? super LeftProjection<L, R>, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return f.apply(this);
}
@Override
public Iterator<L> iterator() {
if (either.isLeft()) {
return Iterator.of(either.getLeft());
} else {
return Iterator.empty();
}
}
@Override
public boolean equals(Object obj) {
return (obj == this) || (obj instanceof LeftProjection && Objects.equals(either, ((LeftProjection<?, ?>) obj).either));
}
@Override
public int hashCode() {
return either.hashCode();
}
@Override
public String stringPrefix() {
return "LeftProjection";
}
@Override
public String toString() {
return stringPrefix() + "(" + either + ")";
}
}
/**
* A right projection of an Either.
*
* @param <L> The type of the Left value of an Either.
* @param <R> The type of the Right value of an Either.
* @deprecated Either is right-biased. Use {@link #swap()} instead of projections.
*/
@Deprecated
public static final class RightProjection<L, R> implements io.vavr.Value<R> {
private final Either<L, R> either;
private RightProjection(Either<L, R> either) {
this.either = either;
}
public <L2, R2> RightProjection<L2, R2> bimap(Function<? super L, ? extends L2> leftMapper, Function<? super R, ? extends R2> rightMapper) {
return either.<L2, R2> bimap(leftMapper, rightMapper).right();
}
/**
* A {@code RightProjection}'s value is computed synchronously.
*
* @return false
*/
@Override
public boolean isAsync() {
return false;
}
@Override
public boolean isEmpty() {
return either.isLeft();
}
/**
* A {@code RightProjection}'s value is computed eagerly.
*
* @return false
*/
@Override
public boolean isLazy() {
return false;
}
/**
* A {@code RightProjection} is single-valued.
*
* @return {@code true}
*/
@Override
public boolean isSingleValued() {
return true;
}
/**
* Gets the {@code Right} value or throws.
*
* @return the right value, if the underlying {@code Either} is a {@code Right}
* @throws NoSuchElementException if the underlying {@code Either} of this {@code RightProjection} is a {@code Left}
*/
@Override
public R get() {
if (either.isRight()) {
return either.get();
} else {
throw new NoSuchElementException("RightProjection.get() on Left");
}
}
@SuppressWarnings("unchecked")
public RightProjection<L, R> orElse(RightProjection<? extends L, ? extends R> other) {
Objects.requireNonNull(other, "other is null");
return either.isRight() ? this : (RightProjection<L, R>) other;
}
@SuppressWarnings("unchecked")
public RightProjection<L, R> orElse(Supplier<? extends RightProjection<? extends L, ? extends R>> supplier) {
Objects.requireNonNull(supplier, "supplier is null");
return either.isRight() ? this : (RightProjection<L, R>) supplier.get();
}
/**
* Gets the Right value or an alternate value, if the projected Either is a Left.
*
* @param other an alternative value
* @return the right value, if the underlying Either is a Right or else {@code other}
* @throws NoSuchElementException if the underlying either of this RightProjection is a Left
*/
@Override
public R getOrElse(R other) {
return either.getOrElse(other);
}
/**
* Gets the Right value or an alternate value, if the projected Either is a Left.
*
* @param other a function which converts a Left value to an alternative Right value
* @return the right value, if the underlying Either is a Right or else the alternative Right value provided by
* {@code other} by applying the Left value.
*/
public R getOrElseGet(Function<? super L, ? extends R> other) {
Objects.requireNonNull(other, "other is null");
return either.getOrElseGet(other);
}
/**
* Runs an action in the case this is a projection on a Left value.
*
* @param action an action which consumes a Left value
*/
public void orElseRun(Consumer<? super L> action) {
Objects.requireNonNull(action, "action is null");
either.orElseRun(action);
}
/**
* Gets the Right value or throws, if the projected Either is a Left.
*
* @param <X> a throwable type
* @param exceptionFunction a function which creates an exception based on a Left value
* @return the right value, if the underlying Either is a Right or else throws the exception provided by
* {@code exceptionFunction} by applying the Left value.
* @throws X if the projected Either is a Left
*/
public <X extends Throwable> R getOrElseThrow(Function<? super L, X> exceptionFunction) throws X {
Objects.requireNonNull(exceptionFunction, "exceptionFunction is null");
return either.getOrElseThrow(exceptionFunction);
}
/**
* Returns the underlying either of this projection.
*
* @return the underlying either
*/
public Either<L, R> toEither() {
return either;
}
/**
* Returns {@code Some} value of type R if this is a right projection of a Right value and the predicate
* applies to the underlying value.
*
* @param predicate A predicate
* @return A new Option
*/
public Option<RightProjection<L, R>> filter(Predicate<? super R> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return either.isLeft() || predicate.test(either.get()) ? Option.some(this) : Option.none();
}
/**
* FlatMaps this RightProjection.
*
* @param mapper A mapper
* @param <U> Component type of the mapped right value
* @return this as {@code RightProjection<L, U>} if a Left is underlying, otherwise a the mapping result of the right value.
* @throws NullPointerException if {@code mapper} is null
*/
@SuppressWarnings("unchecked")
public <U> RightProjection<L, U> flatMap(Function<? super R, ? extends RightProjection<L, ? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (either.isRight()) {
return (RightProjection<L, U>) mapper.apply(either.get());
} else {
return (RightProjection<L, U>) this;
}
}
/**
* Maps the right value if the projected Either is a Right.
*
* @param mapper A mapper which takes a right value and returns a value of type U
* @param <U> The new type of a Right value
* @return A new RightProjection
*/
@SuppressWarnings("unchecked")
@Override
public <U> RightProjection<L, U> map(Function<? super R, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (either.isRight()) {
return either.map((Function<R, U>) mapper).right();
} else {
return (RightProjection<L, U>) this;
}
}
/**
* Applies the given action to the value if the projected either is a Right. Otherwise nothing happens.
*
* @param action An action which takes a right value
* @return this {@code Either} instance
*/
@Override
public RightProjection<L, R> peek(Consumer<? super R> action) {
Objects.requireNonNull(action, "action is null");
if (either.isRight()) {
action.accept(either.get());
}
return this;
}
/**
* Transforms this {@code RightProjection}.
*
* @param f A transformation
* @param <U> Type of transformation result
* @return An instance of type {@code U}
* @throws NullPointerException if {@code f} is null
*/
public <U> U transform(Function<? super RightProjection<L, R>, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return f.apply(this);
}
@Override
public Iterator<R> iterator() {
return either.iterator();
}
@Override
public boolean equals(Object obj) {
return (obj == this) || (obj instanceof RightProjection && Objects.equals(either, ((RightProjection<?, ?>) obj).either));
}
@Override
public int hashCode() {
return either.hashCode();
}
@Override
public String stringPrefix() {
return "RightProjection";
}
@Override
public String toString() {
return stringPrefix() + "(" + either + ")";
}
}
/**
* The {@code Left} version of an {@code Either}.
*
* @param <L> left component type
* @param <R> right component type
* @deprecated will be removed from the public API
*/
@Deprecated
public static final class Left<L, R> extends Either<L, R> implements Serializable {
private static final long serialVersionUID = 1L;
private final L value;
/**
* Constructs a {@code Left}.
*
* @param value a left value
*/
private Left(L value) {
this.value = value;
}
@Override
public R get() {
throw new NoSuchElementException("get() on Left");
}
@Override
public L getLeft() {
return value;
}
@Override
public boolean isLeft() {
return true;
}
@Override
public boolean isRight() {
return false;
}
@Override
public boolean equals(Object obj) {
return (obj == this) || (obj instanceof Left && Objects.equals(value, ((Left<?, ?>) obj).value));
}
@Override
public int hashCode() {
return Objects.hashCode(value);
}
@Override
public String stringPrefix() {
return "Left";
}
@Override
public String toString() {
return stringPrefix() + "(" + value + ")";
}
}
/**
* The {@code Right} version of an {@code Either}.
*
* @param <L> left component type
* @param <R> right component type
* @deprecated will be removed from the public API
*/
@Deprecated
public static final class Right<L, R> extends Either<L, R> implements Serializable {
private static final long serialVersionUID = 1L;
private final R value;
/**
* Constructs a {@code Right}.
*
* @param value a right value
*/
private Right(R value) {
this.value = value;
}
@Override
public R get() {
return value;
}
@Override
public L getLeft() {
throw new NoSuchElementException("getLeft() on Right");
}
@Override
public boolean isLeft() {
return false;
}
@Override
public boolean isRight() {
return true;
}
@Override
public boolean equals(Object obj) {
return (obj == this) || (obj instanceof Right && Objects.equals(value, ((Right<?, ?>) obj).value));
}
@Override
public int hashCode() {
return Objects.hashCode(value);
}
@Override
public String stringPrefix() {
return "Right";
}
@Override
public String toString() {
return stringPrefix() + "(" + value + ")";
}
}
}
| 1 | 13,198 | one space missing at the beginning | vavr-io-vavr | java |
@@ -43,8 +43,9 @@ type RequestDrivenWebServiceProps struct {
// AppRunnerInstanceConfig contains the instance configuration properties for an App Runner service.
type AppRunnerInstanceConfig struct {
- CPU *int `yaml:"cpu"`
- Memory *int `yaml:"memory"`
+ CPU *int `yaml:"cpu"`
+ Memory *int `yaml:"memory"`
+ Platform string `yaml:"platform,omitempty"`
}
// NewRequestDrivenWebService creates a new Request-Driven Web Service manifest with default values. | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/imdario/mergo"
)
const (
requestDrivenWebSvcManifestPath string = "workloads/services/rd-web/manifest.yml"
)
// RequestDrivenWebService holds the configuration to create a Request-Driven Web Service.
type RequestDrivenWebService struct {
Workload `yaml:",inline"`
RequestDrivenWebServiceConfig `yaml:",inline"`
Environments map[string]*RequestDrivenWebServiceConfig `yaml:",flow"` // Fields to override per environment.
parser template.Parser
}
// RequestDrivenWebServiceConfig holds the configuration that can be overridden per environments.
type RequestDrivenWebServiceConfig struct {
RequestDrivenWebServiceHttpConfig `yaml:"http,flow"`
InstanceConfig AppRunnerInstanceConfig `yaml:",inline"`
ImageConfig ImageWithPort `yaml:"image"`
Variables map[string]string `yaml:"variables"`
Tags map[string]string `yaml:"tags"`
}
type RequestDrivenWebServiceHttpConfig struct {
HealthCheckConfiguration HealthCheckArgsOrString `yaml:"healthcheck"`
}
// RequestDrivenWebServiceProps contains properties for creating a new request-driven web service manifest.
type RequestDrivenWebServiceProps struct {
*WorkloadProps
Port uint16
}
// AppRunnerInstanceConfig contains the instance configuration properties for an App Runner service.
type AppRunnerInstanceConfig struct {
CPU *int `yaml:"cpu"`
Memory *int `yaml:"memory"`
}
// NewRequestDrivenWebService creates a new Request-Driven Web Service manifest with default values.
func NewRequestDrivenWebService(props *RequestDrivenWebServiceProps) *RequestDrivenWebService {
svc := newDefaultRequestDrivenWebService()
svc.Name = aws.String(props.Name)
svc.RequestDrivenWebServiceConfig.ImageConfig.Image.Location = stringP(props.Image)
svc.RequestDrivenWebServiceConfig.ImageConfig.Build.BuildArgs.Dockerfile = stringP(props.Dockerfile)
svc.RequestDrivenWebServiceConfig.ImageConfig.Port = aws.Uint16(props.Port)
svc.parser = template.New()
return svc
}
// newDefaultRequestDrivenWebService returns an empty RequestDrivenWebService with only the default values set.
func newDefaultRequestDrivenWebService() *RequestDrivenWebService {
return &RequestDrivenWebService{
Workload: Workload{
Type: aws.String(RequestDrivenWebServiceType),
},
RequestDrivenWebServiceConfig: RequestDrivenWebServiceConfig{
ImageConfig: ImageWithPort{},
InstanceConfig: AppRunnerInstanceConfig{
CPU: aws.Int(1024),
Memory: aws.Int(2048),
},
},
}
}
// MarshalBinary serializes the manifest object into a binary YAML document.
// Implements the encoding.BinaryMarshaler interface.
func (s *RequestDrivenWebService) MarshalBinary() ([]byte, error) {
content, err := s.parser.Parse(requestDrivenWebSvcManifestPath, *s)
if err != nil {
return nil, err
}
return content.Bytes(), nil
}
// BuildRequired returns if the service requires building from the local Dockerfile.
func (s *RequestDrivenWebService) BuildRequired() (bool, error) {
return requiresBuild(s.ImageConfig.Image)
}
// BuildArgs returns a docker.BuildArguments object given a ws root directory.
func (s *RequestDrivenWebService) BuildArgs(wsRoot string) *DockerBuildArgs {
return s.ImageConfig.BuildConfig(wsRoot)
}
// ApplyEnv returns the service manifest with environment overrides.
// If the environment passed in does not have any overrides then it returns itself.
func (s RequestDrivenWebService) ApplyEnv(envName string) (WorkloadManifest, error) {
overrideConfig, ok := s.Environments[envName]
if !ok {
return &s, nil
}
// Apply overrides to the original service configuration.
err := mergo.Merge(&s, RequestDrivenWebService{
RequestDrivenWebServiceConfig: *overrideConfig,
}, mergo.WithOverride, mergo.WithOverwriteWithEmptyValue, mergo.WithTransformers(workloadTransformer{}))
if err != nil {
return nil, err
}
s.Environments = nil
return &s, nil
}
| 1 | 18,471 | I worry that having `Platform` as `string` instead of `*string` would result in it getting overriden with `""` if the environment manifest config doesn't specify `platform` . If it gets overridden as `""`, that could be a problem for users that are not using `linux/amd64` right? If this indeed is a potential impact, could we double check in `ApplyEnv`'s unit test? Same with the `Platform` in `TaskConfig`. Sorry for not seeing this in my previous review!!! | aws-copilot-cli | go |
@@ -26,7 +26,10 @@ exports = module.exports = function(grunt, options) {
'http://' + host + ':<%= connect.test.options.port %>/test/commons/',
'http://' +
host +
- ':<%= connect.test.options.port %>/test/integration/rules/'
+ ':<%= connect.test.options.port %>/test/integration/rules/',
+ 'http://' +
+ host +
+ ':<%= connect.test.options.port %>/test/integration/api/external/'
],
run: true,
growlOnSuccess: false, | 1 | exports = module.exports = function(grunt, options) {
var host = 'localhost';
if (process.env.REMOTE_TESTSERVER_HOST) {
host = process.env.REMOTE_TESTSERVER_HOST;
}
function mapToUrl(files, port) {
return grunt.file.expand(files).map(function(file) {
return 'http://' + host + ':' + port + '/' + file;
});
}
return {
options: options,
unit: {
options: {
logErrors: true,
log: true,
urls: [
'http://' + host + ':<%= connect.test.options.port %>/test/core/',
'http://' + host + ':<%= connect.test.options.port %>/test/checks/',
'http://' +
host +
':<%= connect.test.options.port %>/test/rule-matches/',
'http://' + host + ':<%= connect.test.options.port %>/test/commons/',
'http://' +
host +
':<%= connect.test.options.port %>/test/integration/rules/'
],
run: true,
growlOnSuccess: false,
mocha: {
grep: grunt.option('grep')
}
}
},
integration: {
options: {
log: true,
urls: mapToUrl(
[
'test/integration/full/**/*.html',
'!test/integration/full/**/frames/**/*.html'
].concat([
// These tests can be flaky on AppVeyor in Chrome and frequently fail
process.env.APPVEYOR
? ['!test/integration/full/preload-cssom/preload-cssom.html']
: []
]),
'<%= connect.test.options.port %>'
),
run: true,
growlOnSuccess: false,
mocha: {
grep: grunt.option('grep')
}
}
}
};
};
| 1 | 15,000 | I like the new directory `/api`, what is the thinking behind `/external`? | dequelabs-axe-core | js |
@@ -2844,11 +2844,10 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim
// update account signing keys
a.signingKeys = nil
signersChanged := false
- if len(ac.SigningKeys) > 0 {
- // insure copy the new keys and sort
- a.signingKeys = append(a.signingKeys, ac.SigningKeys...)
- sort.Strings(a.signingKeys)
+ for k, _ := range ac.SigningKeys {
+ a.signingKeys = append(a.signingKeys, k)
}
+ sort.Strings(a.signingKeys)
if len(a.signingKeys) != len(old.signingKeys) {
signersChanged = true
} else { | 1 | // Copyright 2018-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"math"
"math/rand"
"net/http"
"net/textproto"
"net/url"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nkeys"
"github.com/nats-io/nuid"
)
// For backwards compatibility with NATS < 2.0, users who are not explicitly defined into an
// account will be grouped in the default global account.
const globalAccountName = DEFAULT_GLOBAL_ACCOUNT
// Account are subject namespace definitions. By default no messages are shared between accounts.
// You can share via Exports and Imports of Streams and Services.
type Account struct {
Name string
Nkey string
Issuer string
claimJWT string
updated time.Time
mu sync.RWMutex
sqmu sync.Mutex
sl *Sublist
ic *client
isid uint64
etmr *time.Timer
ctmr *time.Timer
strack map[string]sconns
nrclients int32
sysclients int32
nleafs int32
nrleafs int32
clients map[*client]struct{}
rm map[string]int32
lqws map[string]int32
usersRevoked map[string]int64
actsRevoked map[string]int64
mappings []*mapping
lleafs []*client
imports importMap
exports exportMap
js *jsAccount
jsLimits *JetStreamAccountLimits
limits
expired bool
incomplete bool
signingKeys []string
srv *Server // server this account is registered with (possibly nil)
lds string // loop detection subject for leaf nodes
siReply []byte // service reply prefix, will form wildcard subscription.
prand *rand.Rand
eventIds *nuid.NUID
eventIdsMu sync.Mutex
defaultPerms *Permissions
}
// Account based limits.
type limits struct {
mpay int32
msubs int32
mconns int32
mleafs int32
}
// Used to track remote clients and leafnodes per remote server.
type sconns struct {
conns int32
leafs int32
}
// Import stream mapping struct
type streamImport struct {
acc *Account
from string
to string
tr *transform
rtr *transform
claim *jwt.Import
usePub bool
invalid bool
}
// Import service mapping struct
type serviceImport struct {
acc *Account
claim *jwt.Import
se *serviceExport
sid []byte
from string
to string
tr *transform
ts int64
rt ServiceRespType
latency *serviceLatency
m1 *ServiceLatency
rc *client
usePub bool
response bool
invalid bool
share bool
tracking bool
didDeliver bool
isSysAcc bool
trackingHdr http.Header // header from request
}
// This is used to record when we create a mapping for implicit service
// imports. We use this to clean up entries that are not singletons when
// we detect that interest is no longer present. The key to the map will
// be the actual interest. We record the mapped subject and the account.
type serviceRespEntry struct {
acc *Account
msub string
}
// ServiceRespType represents the types of service request response types.
type ServiceRespType uint8
// Service response types. Defaults to a singleton.
const (
Singleton ServiceRespType = iota
Streamed
Chunked
)
// String helper.
func (rt ServiceRespType) String() string {
switch rt {
case Singleton:
return "Singleton"
case Streamed:
return "Streamed"
case Chunked:
return "Chunked"
}
return "Unknown ServiceResType"
}
// exportAuth holds configured approvals or boolean indicating an
// auth token is required for import.
type exportAuth struct {
tokenReq bool
approved map[string]*Account
}
// streamExport
type streamExport struct {
exportAuth
}
// serviceExport holds additional information for exported services.
type serviceExport struct {
exportAuth
acc *Account
respType ServiceRespType
latency *serviceLatency
rtmr *time.Timer
respThresh time.Duration
}
// Used to track service latency.
type serviceLatency struct {
sampling int8 // percentage from 1-100 or 0 to indicate triggered by header
subject string
}
// exportMap tracks the exported streams and services.
type exportMap struct {
streams map[string]*streamExport
services map[string]*serviceExport
responses map[string]*serviceImport
}
// importMap tracks the imported streams and services.
// For services we will also track the response mappings as well.
type importMap struct {
streams []*streamImport
services map[string]*serviceImport
rrMap map[string][]*serviceRespEntry
}
// NewAccount creates a new unlimited account with the given name.
func NewAccount(name string) *Account {
a := &Account{
Name: name,
limits: limits{-1, -1, -1, -1},
eventIds: nuid.New(),
}
return a
}
// Used to create shallow copies of accounts for transfer
// from opts to real accounts in server struct.
func (a *Account) shallowCopy() *Account {
na := NewAccount(a.Name)
na.Nkey = a.Nkey
na.Issuer = a.Issuer
if a.imports.streams != nil {
na.imports.streams = make([]*streamImport, 0, len(a.imports.streams))
for _, v := range a.imports.streams {
si := *v
na.imports.streams = append(na.imports.streams, &si)
}
}
if a.imports.services != nil {
na.imports.services = make(map[string]*serviceImport)
for k, v := range a.imports.services {
si := *v
na.imports.services[k] = &si
}
}
if a.exports.streams != nil {
na.exports.streams = make(map[string]*streamExport)
for k, v := range a.exports.streams {
if v != nil {
se := *v
na.exports.streams[k] = &se
} else {
na.exports.streams[k] = nil
}
}
}
if a.exports.services != nil {
na.exports.services = make(map[string]*serviceExport)
for k, v := range a.exports.services {
if v != nil {
se := *v
na.exports.services[k] = &se
} else {
na.exports.services[k] = nil
}
}
}
// JetStream
na.jsLimits = a.jsLimits
return na
}
// nextEventID uses its own lock for better concurrency.
func (a *Account) nextEventID() string {
a.eventIdsMu.Lock()
id := a.eventIds.Next()
a.eventIdsMu.Unlock()
return id
}
// Called to track a remote server and connections and leafnodes it
// has for this account.
func (a *Account) updateRemoteServer(m *AccountNumConns) []*client {
a.mu.Lock()
if a.strack == nil {
a.strack = make(map[string]sconns)
}
// This does not depend on receiving all updates since each one is idempotent.
// FIXME(dlc) - We should cleanup when these both go to zero.
prev := a.strack[m.Server.ID]
a.strack[m.Server.ID] = sconns{conns: int32(m.Conns), leafs: int32(m.LeafNodes)}
a.nrclients += int32(m.Conns) - prev.conns
a.nrleafs += int32(m.LeafNodes) - prev.leafs
mtce := a.mconns != jwt.NoLimit && (len(a.clients)-int(a.sysclients)+int(a.nrclients) > int(a.mconns))
// If we are over here some have snuck in and we need to rebalance.
// All others will probably be doing the same thing but better to be
// conservative and bit harsh here. Clients will reconnect if we over compensate.
var clients []*client
if mtce {
clients = make([]*client, 0, len(a.clients))
for c := range a.clients {
clients = append(clients, c)
}
sort.Slice(clients, func(i, j int) bool {
return clients[i].start.After(clients[j].start)
})
over := (len(a.clients) - int(a.sysclients) + int(a.nrclients)) - int(a.mconns)
if over < len(clients) {
clients = clients[:over]
}
}
// Now check leafnodes.
mtlce := a.mleafs != jwt.NoLimit && (a.nleafs+a.nrleafs > a.mleafs)
if mtlce {
// Take ones from the end.
leafs := a.lleafs
over := int(a.nleafs + a.nrleafs - a.mleafs)
if over < len(leafs) {
leafs = leafs[len(leafs)-over:]
}
clients = append(clients, leafs...)
}
a.mu.Unlock()
// If we have exceeded our max clients this will be populated.
return clients
}
// Removes tracking for a remote server that has shutdown.
func (a *Account) removeRemoteServer(sid string) {
a.mu.Lock()
if a.strack != nil {
prev := a.strack[sid]
delete(a.strack, sid)
a.nrclients -= prev.conns
a.nrleafs -= prev.leafs
}
a.mu.Unlock()
}
// When querying for subject interest this is the number of
// expected responses. We need to actually check that the entry
// has active connections.
func (a *Account) expectedRemoteResponses() (expected int32) {
a.mu.RLock()
for _, sc := range a.strack {
if sc.conns > 0 || sc.leafs > 0 {
expected++
}
}
a.mu.RUnlock()
return
}
// Clears eventing and tracking for this account.
func (a *Account) clearEventing() {
a.mu.Lock()
a.nrclients = 0
// Now clear state
clearTimer(&a.etmr)
clearTimer(&a.ctmr)
a.clients = nil
a.strack = nil
a.mu.Unlock()
}
// GetName will return the accounts name.
func (a *Account) GetName() string {
if a == nil {
return "n/a"
}
a.mu.RLock()
name := a.Name
a.mu.RUnlock()
return name
}
// NumConnections returns active number of clients for this account for
// all known servers.
func (a *Account) NumConnections() int {
a.mu.RLock()
nc := len(a.clients) + int(a.nrclients)
a.mu.RUnlock()
return nc
}
// NumRemoteConnections returns the number of client or leaf connections that
// are not on this server.
func (a *Account) NumRemoteConnections() int {
a.mu.RLock()
nc := int(a.nrclients + a.nrleafs)
a.mu.RUnlock()
return nc
}
// NumLocalConnections returns active number of clients for this account
// on this server.
func (a *Account) NumLocalConnections() int {
a.mu.RLock()
nlc := a.numLocalConnections()
a.mu.RUnlock()
return nlc
}
// Do not account for the system accounts.
func (a *Account) numLocalConnections() int {
return len(a.clients) - int(a.sysclients) - int(a.nleafs)
}
// This is for extended local interest.
// Lock should not be held.
func (a *Account) numLocalAndLeafConnections() int {
a.mu.RLock()
nlc := len(a.clients) - int(a.sysclients)
a.mu.RUnlock()
return nlc
}
func (a *Account) numLocalLeafNodes() int {
return int(a.nleafs)
}
// MaxTotalConnectionsReached returns if we have reached our limit for number of connections.
func (a *Account) MaxTotalConnectionsReached() bool {
var mtce bool
a.mu.RLock()
if a.mconns != jwt.NoLimit {
mtce = len(a.clients)-int(a.sysclients)+int(a.nrclients) >= int(a.mconns)
}
a.mu.RUnlock()
return mtce
}
// MaxActiveConnections return the set limit for the account system
// wide for total number of active connections.
func (a *Account) MaxActiveConnections() int {
a.mu.RLock()
mconns := int(a.mconns)
a.mu.RUnlock()
return mconns
}
// MaxTotalLeafNodesReached returns if we have reached our limit for number of leafnodes.
func (a *Account) MaxTotalLeafNodesReached() bool {
a.mu.RLock()
mtc := a.maxTotalLeafNodesReached()
a.mu.RUnlock()
return mtc
}
func (a *Account) maxTotalLeafNodesReached() bool {
if a.mleafs != jwt.NoLimit {
return a.nleafs+a.nrleafs >= a.mleafs
}
return false
}
// NumLeafNodes returns the active number of local and remote
// leaf node connections.
func (a *Account) NumLeafNodes() int {
a.mu.RLock()
nln := int(a.nleafs + a.nrleafs)
a.mu.RUnlock()
return nln
}
// NumRemoteLeafNodes returns the active number of remote
// leaf node connections.
func (a *Account) NumRemoteLeafNodes() int {
a.mu.RLock()
nrn := int(a.nrleafs)
a.mu.RUnlock()
return nrn
}
// MaxActiveLeafNodes return the set limit for the account system
// wide for total number of leavenode connections.
// NOTE: these are tracked separately.
func (a *Account) MaxActiveLeafNodes() int {
a.mu.RLock()
mleafs := int(a.mleafs)
a.mu.RUnlock()
return mleafs
}
// RoutedSubs returns how many subjects we would send across a route when first
// connected or expressing interest. Local client subs.
func (a *Account) RoutedSubs() int {
a.mu.RLock()
defer a.mu.RUnlock()
return len(a.rm)
}
// TotalSubs returns total number of Subscriptions for this account.
func (a *Account) TotalSubs() int {
a.mu.RLock()
defer a.mu.RUnlock()
return int(a.sl.Count())
}
// MapDest is for mapping published subjects for clients.
type MapDest struct {
Subject string `json:"subject"`
Weight uint8 `json:"weight"`
OptCluster string `json:"cluster,omitempty"`
}
func NewMapDest(subject string, weight uint8) *MapDest {
return &MapDest{subject, weight, ""}
}
// destination is for internal representation for a weighted mapped destination.
type destination struct {
tr *transform
weight uint8
}
// mapping is an internal entry for mapping subjects.
type mapping struct {
src string
wc bool
dests []*destination
cdests map[string][]*destination
}
// AddMapping adds in a simple route mapping from src subject to dest subject
// for inbound client messages.
func (a *Account) AddMapping(src, dest string) error {
return a.AddWeightedMappings(src, NewMapDest(dest, 100))
}
// AddWeightedMapping will add in a weighted mappings for the destinations.
// TODO(dlc) - Allow cluster filtering
func (a *Account) AddWeightedMappings(src string, dests ...*MapDest) error {
a.mu.Lock()
defer a.mu.Unlock()
// We use this for selecting between multiple weighted destinations.
if a.prand == nil {
a.prand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
if !IsValidSubject(src) {
return ErrBadSubject
}
m := &mapping{src: src, wc: subjectHasWildcard(src), dests: make([]*destination, 0, len(dests)+1)}
seen := make(map[string]struct{})
var tw uint8
for _, d := range dests {
if _, ok := seen[d.Subject]; ok {
return fmt.Errorf("duplicate entry for %q", d.Subject)
}
seen[d.Subject] = struct{}{}
if d.Weight > 100 {
return fmt.Errorf("individual weights need to be <= 100")
}
tw += d.Weight
if tw > 100 {
return fmt.Errorf("total weight needs to be <= 100")
}
if !IsValidSubject(d.Subject) {
return ErrBadSubject
}
tr, err := newTransform(src, d.Subject)
if err != nil {
return err
}
if d.OptCluster == "" {
m.dests = append(m.dests, &destination{tr, d.Weight})
} else {
// We have a cluster scoped filter.
if m.cdests == nil {
m.cdests = make(map[string][]*destination)
}
ad := m.cdests[d.OptCluster]
ad = append(ad, &destination{tr, d.Weight})
m.cdests[d.OptCluster] = ad
}
}
processDestinations := func(dests []*destination) ([]*destination, error) {
var ltw uint8
for _, d := range dests {
ltw += d.weight
}
// Auto add in original at weight difference if all entries weight does not total to 100.
// Iff the src was not already added in explicitly, meaning they want loss.
_, haveSrc := seen[src]
if ltw != 100 && !haveSrc {
dest := src
if m.wc {
// We need to make the appropriate markers for the wildcards etc.
dest = transformTokenize(dest)
}
tr, err := newTransform(src, dest)
if err != nil {
return nil, err
}
aw := 100 - ltw
if len(dests) == 0 {
aw = 100
}
dests = append(dests, &destination{tr, aw})
}
sort.Slice(dests, func(i, j int) bool { return dests[i].weight < dests[j].weight })
var lw uint8
for _, d := range dests {
d.weight += lw
lw = d.weight
}
return dests, nil
}
var err error
if m.dests, err = processDestinations(m.dests); err != nil {
return err
}
// Option cluster scoped destinations
for cluster, dests := range m.cdests {
if dests, err = processDestinations(dests); err != nil {
return err
}
m.cdests[cluster] = dests
}
// Replace an old one if it exists.
for i, m := range a.mappings {
if m.src == src {
a.mappings[i] = m
return nil
}
}
// If we did not replace add to the end.
a.mappings = append(a.mappings, m)
return nil
}
// Helper function to tokenize subjects with partial wildcards into formal transform destinations.
// e.g. foo.*.* -> foo.$1.$2
func transformTokenize(subject string) string {
// We need to make the appropriate markers for the wildcards etc.
i := 1
var nda []string
for _, token := range strings.Split(subject, tsep) {
if token == "*" {
nda = append(nda, fmt.Sprintf("$%d", i))
i++
} else {
nda = append(nda, token)
}
}
return strings.Join(nda, tsep)
}
func transformUntokenize(subject string) (string, []string) {
var phs []string
var nda []string
for _, token := range strings.Split(subject, tsep) {
if len(token) > 1 && token[0] == '$' && token[1] >= '1' && token[1] <= '9' {
phs = append(phs, token)
nda = append(nda, "*")
} else {
nda = append(nda, token)
}
}
return strings.Join(nda, tsep), phs
}
// RemoveMapping will remove an existing mapping.
func (a *Account) RemoveMapping(src string) bool {
a.mu.Lock()
defer a.mu.Unlock()
for i, m := range a.mappings {
if m.src == src {
// Swap last one into this spot. Its ok to change order.
a.mappings[i] = a.mappings[len(a.mappings)-1]
a.mappings[len(a.mappings)-1] = nil // gc
a.mappings = a.mappings[:len(a.mappings)-1]
return true
}
}
return false
}
// Indicates we have mapping entries.
func (a *Account) hasMappings() bool {
if a == nil {
return false
}
a.mu.RLock()
n := len(a.mappings)
a.mu.RUnlock()
return n > 0
}
// This performs the logic to map to a new dest subject based on mappings.
// Should only be called from processInboundClientMsg or service import processing.
func (a *Account) selectMappedSubject(dest string) (string, bool) {
a.mu.RLock()
if len(a.mappings) == 0 {
a.mu.RUnlock()
return dest, false
}
// In case we have to tokenize for subset matching.
tsa := [32]string{}
tts := tsa[:0]
var m *mapping
for _, rm := range a.mappings {
if !rm.wc && rm.src == dest {
m = rm
break
} else {
// tokenize and reuse for subset matching.
if len(tts) == 0 {
start := 0
subject := dest
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tts = append(tts, subject[start:i])
start = i + 1
}
}
tts = append(tts, subject[start:])
}
if isSubsetMatch(tts, rm.src) {
m = rm
break
}
}
}
if m == nil {
a.mu.RUnlock()
return dest, false
}
// The selected destination for the mapping.
var d *destination
var ndest string
dests := m.dests
if len(m.cdests) > 0 {
cn := a.srv.cachedClusterName()
dests = m.cdests[cn]
if dests == nil {
// Fallback to main if we do not match the cluster.
dests = m.dests
}
}
// Optimize for single entry case.
if len(dests) == 1 && dests[0].weight == 100 {
d = dests[0]
} else {
w := uint8(a.prand.Int31n(100))
for _, rm := range dests {
if w < rm.weight {
d = rm
break
}
}
}
if d != nil {
if len(d.tr.dtpi) == 0 {
ndest = d.tr.dest
} else if nsubj, err := d.tr.transform(tts); err == nil {
ndest = nsubj
}
}
a.mu.RUnlock()
return ndest, true
}
// SubscriptionInterest returns true if this account has a matching subscription
// for the given `subject`. Works only for literal subjects.
// TODO: Add support for wildcards
func (a *Account) SubscriptionInterest(subject string) bool {
return a.Interest(subject) > 0
}
// Interest returns the number of subscriptions for a given subject that match.
func (a *Account) Interest(subject string) int {
var nms int
a.mu.RLock()
if a.sl != nil {
res := a.sl.Match(subject)
nms = len(res.psubs) + len(res.qsubs)
}
a.mu.RUnlock()
return nms
}
// addClient keeps our accounting of local active clients or leafnodes updated.
// Returns previous total.
func (a *Account) addClient(c *client) int {
a.mu.Lock()
n := len(a.clients)
if a.clients != nil {
a.clients[c] = struct{}{}
}
added := n != len(a.clients)
if added {
if c.kind == SYSTEM {
a.sysclients++
} else if c.kind == LEAF {
a.nleafs++
a.lleafs = append(a.lleafs, c)
}
}
a.mu.Unlock()
if c != nil && c.srv != nil && added {
c.srv.accConnsUpdate(a)
}
return n
}
// Helper function to remove leaf nodes. If number of leafnodes gets large
// this may need to be optimized out of linear search but believe number
// of active leafnodes per account scope to be small and therefore cache friendly.
// Lock should be held on account.
func (a *Account) removeLeafNode(c *client) {
ll := len(a.lleafs)
for i, l := range a.lleafs {
if l == c {
a.lleafs[i] = a.lleafs[ll-1]
if ll == 1 {
a.lleafs = nil
} else {
a.lleafs = a.lleafs[:ll-1]
}
return
}
}
}
// removeClient keeps our accounting of local active clients updated.
func (a *Account) removeClient(c *client) int {
a.mu.Lock()
n := len(a.clients)
delete(a.clients, c)
removed := n != len(a.clients)
if removed {
if c.kind == SYSTEM {
a.sysclients--
} else if c.kind == LEAF {
a.nleafs--
a.removeLeafNode(c)
}
}
a.mu.Unlock()
if c != nil && c.srv != nil && removed {
c.srv.mu.Lock()
doRemove := a != c.srv.gacc
c.srv.mu.Unlock()
if doRemove {
c.srv.accConnsUpdate(a)
}
}
return n
}
func (a *Account) randomClient() *client {
if a.ic != nil {
return a.ic
}
var c *client
for c = range a.clients {
break
}
return c
}
// AddServiceExport will configure the account with the defined export.
func (a *Account) AddServiceExport(subject string, accounts []*Account) error {
return a.AddServiceExportWithResponse(subject, Singleton, accounts)
}
// AddServiceExportWithResponse will configure the account with the defined export and response type.
func (a *Account) AddServiceExportWithResponse(subject string, respType ServiceRespType, accounts []*Account) error {
if a == nil {
return ErrMissingAccount
}
a.mu.Lock()
defer a.mu.Unlock()
if a.exports.services == nil {
a.exports.services = make(map[string]*serviceExport)
}
se := a.exports.services[subject]
// Always create a service export
if se == nil {
se = &serviceExport{}
}
if respType != Singleton {
se.respType = respType
}
if accounts != nil {
// empty means auth required but will be import token.
if len(accounts) == 0 {
se.tokenReq = true
} else {
if se.approved == nil {
se.approved = make(map[string]*Account, len(accounts))
}
for _, acc := range accounts {
se.approved[acc.Name] = acc
}
}
}
lrt := a.lowestServiceExportResponseTime()
se.acc = a
se.respThresh = DEFAULT_SERVICE_EXPORT_RESPONSE_THRESHOLD
a.exports.services[subject] = se
if nlrt := a.lowestServiceExportResponseTime(); nlrt != lrt {
a.updateAllClientsServiceExportResponseTime(nlrt)
}
return nil
}
// TrackServiceExport will enable latency tracking of the named service.
// Results will be published in this account to the given results subject.
func (a *Account) TrackServiceExport(service, results string) error {
return a.TrackServiceExportWithSampling(service, results, DEFAULT_SERVICE_LATENCY_SAMPLING)
}
// TrackServiceExportWithSampling will enable latency tracking of the named service for the given
// sampling rate (1-100). Results will be published in this account to the given results subject.
func (a *Account) TrackServiceExportWithSampling(service, results string, sampling int) error {
if a == nil {
return ErrMissingAccount
}
if sampling != 0 { // 0 means triggered by header
if sampling < 1 || sampling > 100 {
return ErrBadSampling
}
}
if !IsValidPublishSubject(results) {
return ErrBadPublishSubject
}
// Don't loop back on outselves.
if a.IsExportService(results) {
return ErrBadPublishSubject
}
if a.srv != nil && !a.srv.EventsEnabled() {
return ErrNoSysAccount
}
a.mu.Lock()
if a.exports.services == nil {
a.mu.Unlock()
return ErrMissingService
}
ea, ok := a.exports.services[service]
if !ok {
a.mu.Unlock()
return ErrMissingService
}
if ea == nil {
ea = &serviceExport{}
a.exports.services[service] = ea
} else if ea.respType != Singleton {
a.mu.Unlock()
return ErrBadServiceType
}
ea.latency = &serviceLatency{
sampling: int8(sampling),
subject: results,
}
s := a.srv
a.mu.Unlock()
if s == nil {
return nil
}
// Now track down the imports and add in latency as needed to enable.
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
acc.mu.Lock()
for _, im := range acc.imports.services {
if im != nil && im.acc.Name == a.Name && subjectIsSubsetMatch(im.to, service) {
im.latency = ea.latency
}
}
acc.mu.Unlock()
return true
})
return nil
}
// UnTrackServiceExport will disable latency tracking of the named service.
func (a *Account) UnTrackServiceExport(service string) {
if a == nil || (a.srv != nil && !a.srv.EventsEnabled()) {
return
}
a.mu.Lock()
if a == nil || a.exports.services == nil {
a.mu.Unlock()
return
}
ea, ok := a.exports.services[service]
if !ok || ea == nil || ea.latency == nil {
a.mu.Unlock()
return
}
// We have latency here.
ea.latency = nil
s := a.srv
a.mu.Unlock()
if s == nil {
return
}
// Now track down the imports and clean them up.
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
acc.mu.Lock()
for _, im := range acc.imports.services {
if im != nil && im.acc.Name == a.Name {
if subjectIsSubsetMatch(im.to, service) {
im.latency, im.m1 = nil, nil
}
}
}
acc.mu.Unlock()
return true
})
}
// IsExportService will indicate if this service exists. Will check wildcard scenarios.
func (a *Account) IsExportService(service string) bool {
a.mu.RLock()
defer a.mu.RUnlock()
_, ok := a.exports.services[service]
if ok {
return true
}
tokens := strings.Split(service, tsep)
for subj := range a.exports.services {
if isSubsetMatch(tokens, subj) {
return true
}
}
return false
}
// IsExportServiceTracking will indicate if given publish subject is an export service with tracking enabled.
func (a *Account) IsExportServiceTracking(service string) bool {
a.mu.RLock()
ea, ok := a.exports.services[service]
if ok && ea == nil {
a.mu.RUnlock()
return false
}
if ok && ea != nil && ea.latency != nil {
a.mu.RUnlock()
return true
}
// FIXME(dlc) - Might want to cache this is in the hot path checking for latency tracking.
tokens := strings.Split(service, tsep)
for subj, ea := range a.exports.services {
if isSubsetMatch(tokens, subj) && ea != nil && ea.latency != nil {
a.mu.RUnlock()
return true
}
}
a.mu.RUnlock()
return false
}
// ServiceLatency is the JSON message sent out in response to latency tracking for
// an accounts exported services. Additional client info is available in requestor
// and responder. Note that for a requestor, the only information shared by default
// is the RTT used to calculate the total latency. The requestor's account can
// designate to share the additional information in the service import.
type ServiceLatency struct {
TypedEvent
Status int `json:"status"`
Error string `json:"description,omitempty"`
Requestor LatencyClient `json:"requestor,omitempty"`
Responder LatencyClient `json:"responder,omitempty"`
RequestHeader http.Header `json:"header,omitempty"` // only contains header(s) triggering the measurement
RequestStart time.Time `json:"start"`
ServiceLatency time.Duration `json:"service"`
SystemLatency time.Duration `json:"system"`
TotalLatency time.Duration `json:"total"`
}
// ServiceLatencyType is the NATS Event Type for ServiceLatency
const ServiceLatencyType = "io.nats.server.metric.v1.service_latency"
// LatencyClient is the JSON message structure assigned to requestors and responders.
// Note that for a requestor, the only information shared by default is the RTT used
// to calculate the total latency. The requestor's account can designate to share
// the additional information in the service import.
type LatencyClient struct {
Account string `json:"acc"`
RTT time.Duration `json:"rtt"`
Start time.Time `json:"start,omitempty"`
User string `json:"user,omitempty"`
Name string `json:"name,omitempty"`
Lang string `json:"lang,omitempty"`
Version string `json:"ver,omitempty"`
IP string `json:"ip,omitempty"`
CID uint64 `json:"cid,omitempty"`
Server string `json:"server,omitempty"`
}
// NATSTotalTime is a helper function that totals the NATS latencies.
func (nl *ServiceLatency) NATSTotalTime() time.Duration {
return nl.Requestor.RTT + nl.Responder.RTT + nl.SystemLatency
}
// Merge function to merge m1 and m2 (requestor and responder) measurements
// when there are two samples. This happens when the requestor and responder
// are on different servers.
//
// m2 ServiceLatency is correct, so use that.
// m1 TotalLatency is correct, so use that.
// Will use those to back into NATS latency.
func (m1 *ServiceLatency) merge(m2 *ServiceLatency) {
m1.SystemLatency = m1.ServiceLatency - (m2.ServiceLatency + m2.Responder.RTT)
m1.ServiceLatency = m2.ServiceLatency
m1.Responder = m2.Responder
sanitizeLatencyMetric(m1)
}
// sanitizeLatencyMetric adjusts latency metric values that could go
// negative in some edge conditions since we estimate client RTT
// for both requestor and responder.
// These numbers are never meant to be negative, it just could be
// how we back into the values based on estimated RTT.
func sanitizeLatencyMetric(sl *ServiceLatency) {
if sl.ServiceLatency < 0 {
sl.ServiceLatency = 0
}
if sl.SystemLatency < 0 {
sl.SystemLatency = 0
}
}
// Used for transporting remote latency measurements.
type remoteLatency struct {
Account string `json:"account"`
ReqId string `json:"req_id"`
M2 ServiceLatency `json:"m2"`
respThresh time.Duration
}
// sendLatencyResult will send a latency result and clear the si of the requestor(rc).
func (a *Account) sendLatencyResult(si *serviceImport, sl *ServiceLatency) {
sl.Type = ServiceLatencyType
sl.ID = a.nextEventID()
sl.Time = time.Now().UTC()
a.mu.Lock()
lsubj := si.latency.subject
si.rc = nil
a.mu.Unlock()
a.srv.sendInternalAccountMsg(a, lsubj, sl)
}
// Used to send a bad request metric when we do not have a reply subject
func (a *Account) sendBadRequestTrackingLatency(si *serviceImport, requestor *client, header http.Header) {
sl := &ServiceLatency{
Status: 400,
Error: "Bad Request",
Requestor: requestor.getClientInfo(si.share),
}
sl.RequestHeader = header
sl.RequestStart = time.Now().Add(-sl.Requestor.RTT).UTC()
a.sendLatencyResult(si, sl)
}
// Used to send a latency result when the requestor interest was lost before the
// response could be delivered.
func (a *Account) sendReplyInterestLostTrackLatency(si *serviceImport) {
sl := &ServiceLatency{
Status: 408,
Error: "Request Timeout",
}
a.mu.RLock()
rc := si.rc
share := si.share
ts := si.ts
sl.RequestHeader = si.trackingHdr
a.mu.RUnlock()
if rc != nil {
sl.Requestor = rc.getClientInfo(share)
}
sl.RequestStart = time.Unix(0, ts-int64(sl.Requestor.RTT)).UTC()
a.sendLatencyResult(si, sl)
}
func (a *Account) sendBackendErrorTrackingLatency(si *serviceImport, reason rsiReason) {
sl := &ServiceLatency{}
a.mu.RLock()
rc := si.rc
share := si.share
ts := si.ts
sl.RequestHeader = si.trackingHdr
a.mu.RUnlock()
if rc != nil {
sl.Requestor = rc.getClientInfo(share)
}
sl.RequestStart = time.Unix(0, ts-int64(sl.Requestor.RTT)).UTC()
if reason == rsiNoDelivery {
sl.Status = 503
sl.Error = "Service Unavailable"
} else if reason == rsiTimeout {
sl.Status = 504
sl.Error = "Service Timeout"
}
a.sendLatencyResult(si, sl)
}
// sendTrackingMessage will send out the appropriate tracking information for the
// service request/response latency. This is called when the requestor's server has
// received the response.
// TODO(dlc) - holding locks for RTTs may be too much long term. Should revisit.
func (a *Account) sendTrackingLatency(si *serviceImport, responder *client) bool {
if si.rc == nil {
return true
}
ts := time.Now()
serviceRTT := time.Duration(ts.UnixNano() - si.ts)
requestor := si.rc
sl := &ServiceLatency{
Status: 200,
Requestor: requestor.getClientInfo(si.share),
Responder: responder.getClientInfo(true),
}
sl.RequestStart = time.Unix(0, si.ts-int64(sl.Requestor.RTT)).UTC()
sl.ServiceLatency = serviceRTT - sl.Responder.RTT
sl.TotalLatency = sl.Requestor.RTT + serviceRTT
if sl.Responder.RTT > 0 {
sl.SystemLatency = time.Since(ts)
sl.TotalLatency += sl.SystemLatency
}
sl.RequestHeader = si.trackingHdr
sanitizeLatencyMetric(sl)
sl.Type = ServiceLatencyType
sl.ID = a.nextEventID()
sl.Time = time.Now().UTC()
// If we are expecting a remote measurement, store our sl here.
// We need to account for the race between this and us receiving the
// remote measurement.
// FIXME(dlc) - We need to clean these up but this should happen
// already with the auto-expire logic.
if responder != nil && responder.kind != CLIENT {
si.acc.mu.Lock()
if si.m1 != nil {
m1, m2 := sl, si.m1
m1.merge(m2)
si.acc.mu.Unlock()
a.srv.sendInternalAccountMsg(a, si.latency.subject, m1)
a.mu.Lock()
si.rc = nil
a.mu.Unlock()
return true
}
si.m1 = sl
si.acc.mu.Unlock()
return false
} else {
a.srv.sendInternalAccountMsg(a, si.latency.subject, sl)
a.mu.Lock()
si.rc = nil
a.mu.Unlock()
}
return true
}
// This will check to make sure our response lower threshold is set
// properly in any clients doing rrTracking.
// Lock should be held.
func (a *Account) updateAllClientsServiceExportResponseTime(lrt time.Duration) {
for c := range a.clients {
c.mu.Lock()
if c.rrTracking != nil && lrt != c.rrTracking.lrt {
c.rrTracking.lrt = lrt
if c.rrTracking.ptmr.Stop() {
c.rrTracking.ptmr.Reset(lrt)
}
}
c.mu.Unlock()
}
}
// Will select the lowest respThresh from all service exports.
// Read lock should be held.
func (a *Account) lowestServiceExportResponseTime() time.Duration {
// Lowest we will allow is 5 minutes. Its an upper bound for this function.
lrt := time.Duration(5 * time.Minute)
for _, se := range a.exports.services {
if se.respThresh < lrt {
lrt = se.respThresh
}
}
return lrt
}
// AddServiceImportWithClaim will add in the service import via the jwt claim.
func (a *Account) AddServiceImportWithClaim(destination *Account, from, to string, imClaim *jwt.Import) error {
if destination == nil {
return ErrMissingAccount
}
// Empty means use from.
if to == "" {
to = from
}
if !IsValidSubject(from) || !IsValidSubject(to) {
return ErrInvalidSubject
}
// First check to see if the account has authorized us to route to the "to" subject.
if !destination.checkServiceImportAuthorized(a, to, imClaim) {
return ErrServiceImportAuthorization
}
// Check if this introduces a cycle before proceeding.
if err := a.serviceImportFormsCycle(destination, from); err != nil {
return err
}
_, err := a.addServiceImport(destination, from, to, imClaim)
return err
}
const MaxAccountCycleSearchDepth = 1024
func (a *Account) serviceImportFormsCycle(dest *Account, from string) error {
return dest.checkServiceImportsForCycles(from, map[string]bool{a.Name: true})
}
func (a *Account) checkServiceImportsForCycles(from string, visited map[string]bool) error {
if len(visited) >= MaxAccountCycleSearchDepth {
return ErrCycleSearchDepth
}
a.mu.RLock()
for _, si := range a.imports.services {
if SubjectsCollide(from, si.to) {
a.mu.RUnlock()
if visited[si.acc.Name] {
return ErrImportFormsCycle
}
// Push ourselves and check si.acc
visited[a.Name] = true
if subjectIsSubsetMatch(si.from, from) {
from = si.from
}
if err := si.acc.checkServiceImportsForCycles(from, visited); err != nil {
return err
}
a.mu.RLock()
}
}
a.mu.RUnlock()
return nil
}
func (a *Account) streamImportFormsCycle(dest *Account, to string) error {
return dest.checkStreamImportsForCycles(to, map[string]bool{a.Name: true})
}
// Lock should be held.
func (a *Account) hasStreamExportMatching(to string) bool {
for subj := range a.exports.streams {
if subjectIsSubsetMatch(to, subj) {
return true
}
}
return false
}
func (a *Account) checkStreamImportsForCycles(to string, visited map[string]bool) error {
if len(visited) >= MaxAccountCycleSearchDepth {
return ErrCycleSearchDepth
}
a.mu.RLock()
if !a.hasStreamExportMatching(to) {
a.mu.RUnlock()
return nil
}
for _, si := range a.imports.streams {
if SubjectsCollide(to, si.to) {
a.mu.RUnlock()
if visited[si.acc.Name] {
return ErrImportFormsCycle
}
// Push ourselves and check si.acc
visited[a.Name] = true
if subjectIsSubsetMatch(si.to, to) {
to = si.to
}
if err := si.acc.checkStreamImportsForCycles(to, visited); err != nil {
return err
}
a.mu.RLock()
}
}
a.mu.RUnlock()
return nil
}
// SetServiceImportSharing will allow sharing of information about requests with the export account.
// Used for service latency tracking at the moment.
func (a *Account) SetServiceImportSharing(destination *Account, to string, allow bool) error {
a.mu.Lock()
defer a.mu.Unlock()
if a.isClaimAccount() {
return fmt.Errorf("claim based accounts can not be updated directly")
}
for _, si := range a.imports.services {
if si.acc == destination && si.to == to {
si.share = allow
return nil
}
}
return fmt.Errorf("service import not found")
}
// AddServiceImport will add a route to an account to send published messages / requests
// to the destination account. From is the local subject to map, To is the
// subject that will appear on the destination account. Destination will need
// to have an import rule to allow access via addService.
func (a *Account) AddServiceImport(destination *Account, from, to string) error {
return a.AddServiceImportWithClaim(destination, from, to, nil)
}
// NumPendingReverseResponses returns the number of response mappings we have for all outstanding
// requests for service imports.
func (a *Account) NumPendingReverseResponses() int {
a.mu.RLock()
defer a.mu.RUnlock()
return len(a.imports.rrMap)
}
// NumPendingAllResponses return the number of all responses outstanding for service exports.
func (a *Account) NumPendingAllResponses() int {
return a.NumPendingResponses("")
}
// NumResponsesPending returns the number of responses outstanding for service exports
// on this account. An empty filter string returns all responses regardless of which export.
// If you specify the filter we will only return ones that are for that export.
// NOTE this is only for what this server is tracking.
func (a *Account) NumPendingResponses(filter string) int {
a.mu.RLock()
defer a.mu.RUnlock()
if filter == "" {
return len(a.exports.responses)
}
se := a.getServiceExport(filter)
if se == nil {
return 0
}
var nre int
for _, si := range a.exports.responses {
if si.se == se {
nre++
}
}
return nre
}
// NumServiceImports returns the number of service imports we have configured.
func (a *Account) NumServiceImports() int {
a.mu.RLock()
defer a.mu.RUnlock()
return len(a.imports.services)
}
// Reason why we are removing this response serviceImport.
type rsiReason int
const (
rsiOk = rsiReason(iota)
rsiNoDelivery
rsiTimeout
)
// removeRespServiceImport removes a response si mapping and the reverse entries for interest detection.
func (a *Account) removeRespServiceImport(si *serviceImport, reason rsiReason) {
if si == nil {
return
}
a.mu.Lock()
delete(a.exports.responses, si.from)
dest := si.acc
to := si.to
tracking := si.tracking
rc := si.rc
a.mu.Unlock()
if tracking && rc != nil {
a.sendBackendErrorTrackingLatency(si, reason)
}
dest.checkForReverseEntry(to, si, false)
}
// removeServiceImport will remove the route by subject.
func (a *Account) removeServiceImport(subject string) {
a.mu.Lock()
si, ok := a.imports.services[subject]
delete(a.imports.services, subject)
var sid []byte
c := a.ic
if ok && si != nil {
if a.ic != nil && si.sid != nil {
sid = si.sid
}
}
a.mu.Unlock()
if sid != nil {
c.processUnsub(sid)
}
}
// This tracks responses to service requests mappings. This is used for cleanup.
func (a *Account) addReverseRespMapEntry(acc *Account, reply, from string) {
a.mu.Lock()
if a.imports.rrMap == nil {
a.imports.rrMap = make(map[string][]*serviceRespEntry)
}
sre := &serviceRespEntry{acc, from}
sra := a.imports.rrMap[reply]
a.imports.rrMap[reply] = append(sra, sre)
a.mu.Unlock()
}
// checkForReverseEntries is for when we are trying to match reverse entries to a wildcard.
// This will be called from checkForReverseEntry when the reply arg is a wildcard subject.
// This will usually be called in a go routine since we need to walk all the entries.
func (a *Account) checkForReverseEntries(reply string, checkInterest bool) {
a.mu.RLock()
if len(a.imports.rrMap) == 0 {
a.mu.RUnlock()
return
}
if subjectIsLiteral(reply) {
a.mu.RUnlock()
a.checkForReverseEntry(reply, nil, checkInterest)
return
}
var _rs [32]string
rs := _rs[:0]
for k := range a.imports.rrMap {
if subjectIsSubsetMatch(k, reply) {
rs = append(rs, k)
}
}
a.mu.RUnlock()
for _, reply := range rs {
a.checkForReverseEntry(reply, nil, checkInterest)
}
}
// This checks for any response map entries. If you specify an si we will only match and
// clean up for that one, otherwise we remove them all.
func (a *Account) checkForReverseEntry(reply string, si *serviceImport, checkInterest bool) {
a.mu.RLock()
if len(a.imports.rrMap) == 0 {
a.mu.RUnlock()
return
}
if subjectHasWildcard(reply) {
a.mu.RUnlock()
go a.checkForReverseEntries(reply, checkInterest)
return
}
sres := a.imports.rrMap[reply]
if sres == nil {
a.mu.RUnlock()
return
}
// If we are here we have an entry we should check.
// If requested we will first check if there is any
// interest for this subject for the entire account.
// If there is we can not delete any entries yet.
// Note that if we are here reply has to be a literal subject.
if checkInterest {
// If interest still exists we can not clean these up yet.
if rr := a.sl.Match(reply); len(rr.psubs)+len(rr.qsubs) > 0 {
a.mu.RUnlock()
return
}
}
a.mu.RUnlock()
// Delete the appropriate entries here based on optional si.
a.mu.Lock()
if si == nil {
delete(a.imports.rrMap, reply)
} else {
// Find the one we are looking for..
for i, sre := range sres {
if sre.msub == si.from {
sres = append(sres[:i], sres[i+1:]...)
break
}
}
if len(sres) > 0 {
a.imports.rrMap[si.to] = sres
} else {
delete(a.imports.rrMap, si.to)
}
}
a.mu.Unlock()
// If we are here we no longer have interest and we have
// response entries that we should clean up.
if si == nil {
for _, sre := range sres {
acc := sre.acc
var trackingCleanup bool
var rsi *serviceImport
acc.mu.Lock()
if rsi = acc.exports.responses[sre.msub]; rsi != nil && !rsi.didDeliver {
delete(acc.exports.responses, rsi.from)
trackingCleanup = rsi.tracking && rsi.rc != nil
}
acc.mu.Unlock()
if trackingCleanup {
acc.sendReplyInterestLostTrackLatency(rsi)
}
}
}
}
// Internal check to see if a service import exists.
func (a *Account) serviceImportExists(dest *Account, from string) bool {
a.mu.RLock()
dup := a.imports.services[from]
a.mu.RUnlock()
return dup != nil
}
// Add a service import.
// This does no checks and should only be called by the msg processing code. Use
// AddServiceImport from above if responding to user input or config changes, etc.
func (a *Account) addServiceImport(dest *Account, from, to string, claim *jwt.Import) (*serviceImport, error) {
rt := Singleton
var lat *serviceLatency
dest.mu.RLock()
se := dest.getServiceExport(to)
if se != nil {
rt = se.respType
lat = se.latency
}
s := dest.srv
dest.mu.RUnlock()
// Track if this maps us to the system account.
var isSysAcc bool
if s != nil {
s.mu.Lock()
if s.sys != nil && dest == s.sys.account {
isSysAcc = true
}
s.mu.Unlock()
}
a.mu.Lock()
if a.imports.services == nil {
a.imports.services = make(map[string]*serviceImport)
} else if dup := a.imports.services[from]; dup != nil {
a.mu.Unlock()
return nil, fmt.Errorf("duplicate service import subject %q, previously used in import for account %q, subject %q",
from, dup.acc.Name, dup.to)
}
if to == "" {
to = from
}
// Check to see if we have a wildcard
var (
usePub bool
tr *transform
err error
)
if subjectHasWildcard(to) {
// If to and from match, then we use the published subject.
if to == from {
usePub = true
} else {
from, _ = transformUntokenize(from)
// Create a transform
if tr, err = newTransform(from, transformTokenize(to)); err != nil {
a.mu.Unlock()
return nil, fmt.Errorf("failed to create mapping transform for service import subject %q to %q: %v",
from, to, err)
}
}
}
share := false
if claim != nil {
share = claim.Share
}
si := &serviceImport{dest, claim, se, nil, from, to, tr, 0, rt, lat, nil, nil, usePub, false, false, share, false, false, isSysAcc, nil}
a.imports.services[from] = si
a.mu.Unlock()
if err := a.addServiceImportSub(si); err != nil {
a.removeServiceImport(si.from)
return nil, err
}
return si, nil
}
// Returns the internal client, will create one if not present.
// Lock should be held.
func (a *Account) internalClient() *client {
if a.ic == nil && a.srv != nil {
a.ic = a.srv.createInternalAccountClient()
a.ic.acc = a
}
return a.ic
}
// Internal account scoped subscriptions.
func (a *Account) subscribeInternal(subject string, cb msgHandler) (*subscription, error) {
a.mu.Lock()
c := a.internalClient()
a.isid++
sid := strconv.FormatUint(a.isid, 10)
a.mu.Unlock()
// This will happen in parsing when the account has not been properly setup.
if c == nil {
return nil, fmt.Errorf("no internal account client")
}
return c.processSub([]byte(subject), nil, []byte(sid), cb, false)
}
// This will add an account subscription that matches the "from" from a service import entry.
func (a *Account) addServiceImportSub(si *serviceImport) error {
a.mu.Lock()
c := a.internalClient()
// This will happen in parsing when the account has not been properly setup.
if c == nil {
a.mu.Unlock()
return nil
}
if si.sid != nil {
a.mu.Unlock()
return fmt.Errorf("duplicate call to create subscription for service import")
}
a.isid++
sid := strconv.FormatUint(a.isid, 10)
si.sid = []byte(sid)
subject := si.from
a.mu.Unlock()
cb := func(sub *subscription, c *client, subject, reply string, msg []byte) {
c.processServiceImport(si, a, msg)
}
_, err := c.processSub([]byte(subject), nil, []byte(sid), cb, true)
return err
}
// Remove all the subscriptions associated with service imports.
func (a *Account) removeAllServiceImportSubs() {
a.mu.RLock()
var sids [][]byte
for _, si := range a.imports.services {
if si.sid != nil {
sids = append(sids, si.sid)
si.sid = nil
}
}
c := a.ic
a.ic = nil
a.mu.RUnlock()
if c == nil {
return
}
for _, sid := range sids {
c.processUnsub(sid)
}
c.closeConnection(InternalClient)
}
// Add in subscriptions for all registered service imports.
func (a *Account) addAllServiceImportSubs() {
for _, si := range a.imports.services {
a.addServiceImportSub(si)
}
}
var (
// header where all information is encoded in one value.
trcUber = textproto.CanonicalMIMEHeaderKey("Uber-Trace-Id")
trcCtx = textproto.CanonicalMIMEHeaderKey("Traceparent")
trcB3 = textproto.CanonicalMIMEHeaderKey("B3")
// openzipkin header to check
trcB3Sm = textproto.CanonicalMIMEHeaderKey("X-B3-Sampled")
trcB3Id = textproto.CanonicalMIMEHeaderKey("X-B3-TraceId")
// additional header needed to include when present
trcB3PSId = textproto.CanonicalMIMEHeaderKey("X-B3-ParentSpanId")
trcB3SId = textproto.CanonicalMIMEHeaderKey("X-B3-SpanId")
trcCtxSt = textproto.CanonicalMIMEHeaderKey("Tracestate")
trcUberCtxPrefix = textproto.CanonicalMIMEHeaderKey("Uberctx-")
)
func newB3Header(h http.Header) http.Header {
retHdr := http.Header{}
if v, ok := h[trcB3Sm]; ok {
retHdr[trcB3Sm] = v
}
if v, ok := h[trcB3Id]; ok {
retHdr[trcB3Id] = v
}
if v, ok := h[trcB3PSId]; ok {
retHdr[trcB3PSId] = v
}
if v, ok := h[trcB3SId]; ok {
retHdr[trcB3SId] = v
}
return retHdr
}
func newUberHeader(h http.Header, tId []string) http.Header {
retHdr := http.Header{trcUber: tId}
for k, v := range h {
if strings.HasPrefix(k, trcUberCtxPrefix) {
retHdr[k] = v
}
}
return retHdr
}
func newTraceCtxHeader(h http.Header, tId []string) http.Header {
retHdr := http.Header{trcCtx: tId}
if v, ok := h[trcCtxSt]; ok {
retHdr[trcCtxSt] = v
}
return retHdr
}
// Helper to determine when to sample. When header has a value, sampling is driven by header
func shouldSample(l *serviceLatency, c *client) (bool, http.Header) {
if l == nil {
return false, nil
}
if l.sampling < 0 {
return false, nil
}
if l.sampling >= 100 {
return true, nil
}
if l.sampling > 0 && rand.Int31n(100) <= int32(l.sampling) {
return true, nil
}
h := c.parseState.getHeader()
if len(h) == 0 {
return false, nil
}
if tId := h[trcUber]; len(tId) != 0 {
// sample 479fefe9525eddb:5adb976bfc1f95c1:479fefe9525eddb:1
tk := strings.Split(tId[0], ":")
if len(tk) == 4 && len(tk[3]) > 0 && len(tk[3]) <= 2 {
dst := [2]byte{}
src := [2]byte{'0', tk[3][0]}
if len(tk[3]) == 2 {
src[1] = tk[3][1]
}
if _, err := hex.Decode(dst[:], src[:]); err == nil && dst[0]&1 == 1 {
return true, newUberHeader(h, tId)
}
}
return false, nil
} else if sampled := h[trcB3Sm]; len(sampled) != 0 && sampled[0] == "1" {
return true, newB3Header(h) // allowed
} else if len(sampled) != 0 && sampled[0] == "0" {
return false, nil // denied
} else if _, ok := h[trcB3Id]; ok {
// sample 80f198ee56343ba864fe8b2a57d3eff7
// presence (with X-B3-Sampled not being 0) means sampling left to recipient
return true, newB3Header(h)
} else if b3 := h[trcB3]; len(b3) != 0 {
// sample 80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1-1-05e3ac9a4f6e3b90
// sample 0
tk := strings.Split(b3[0], "-")
if len(tk) > 2 && tk[2] == "0" {
return false, nil // denied
} else if len(tk) == 1 && tk[0] == "0" {
return false, nil // denied
}
return true, http.Header{trcB3: b3} // sampling allowed or left to recipient of header
} else if tId := h[trcCtx]; len(tId) != 0 {
// sample 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01
tk := strings.Split(tId[0], "-")
if len(tk) == 4 && len([]byte(tk[3])) == 2 && tk[3] == "01" {
return true, newTraceCtxHeader(h, tId)
} else {
return false, nil
}
}
return false, nil
}
// Used to mimic client like replies.
const (
replyPrefix = "_R_."
trackSuffix = ".T"
replyPrefixLen = len(replyPrefix)
baseServerLen = 10
replyLen = 6
minReplyLen = 15
digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
base = 62
)
// This is where all service export responses are handled.
func (a *Account) processServiceImportResponse(sub *subscription, c *client, subject, reply string, msg []byte) {
a.mu.RLock()
if a.expired || len(a.exports.responses) == 0 {
a.mu.RUnlock()
return
}
si := a.exports.responses[subject]
if si == nil || si.invalid {
a.mu.RUnlock()
return
}
a.mu.RUnlock()
// Send for normal processing.
c.processServiceImport(si, a, msg)
}
// Will create a wildcard subscription to handle interest graph propagation for all
// service replies.
// Lock should not be held.
func (a *Account) createRespWildcard() []byte {
a.mu.Lock()
if a.prand == nil {
a.prand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
var b = [baseServerLen]byte{'_', 'R', '_', '.'}
rn := a.prand.Int63()
for i, l := replyPrefixLen, rn; i < len(b); i++ {
b[i] = digits[l%base]
l /= base
}
a.siReply = append(b[:], '.')
pre := a.siReply
wcsub := append(a.siReply, '>')
c := a.internalClient()
a.isid++
sid := strconv.FormatUint(a.isid, 10)
a.mu.Unlock()
// Create subscription and internal callback for all the wildcard response subjects.
c.processSub(wcsub, nil, []byte(sid), a.processServiceImportResponse, false)
return pre
}
// Test whether this is a tracked reply.
func isTrackedReply(reply []byte) bool {
lreply := len(reply) - 1
return lreply > 3 && reply[lreply-1] == '.' && reply[lreply] == 'T'
}
// Generate a new service reply from the wildcard prefix.
// FIXME(dlc) - probably do not have to use rand here. about 25ns per.
func (a *Account) newServiceReply(tracking bool) []byte {
a.mu.RLock()
replyPre := a.siReply
s := a.srv
a.mu.RUnlock()
if replyPre == nil {
replyPre = a.createRespWildcard()
}
var b [replyLen]byte
rn := a.prand.Int63()
for i, l := 0, rn; i < len(b); i++ {
b[i] = digits[l%base]
l /= base
}
// Make sure to copy.
reply := make([]byte, 0, len(replyPre)+len(b))
reply = append(reply, replyPre...)
reply = append(reply, b[:]...)
if tracking && s.sys != nil {
// Add in our tracking identifier. This allows the metrics to get back to only
// this server without needless SUBS/UNSUBS.
reply = append(reply, '.')
reply = append(reply, s.sys.shash...)
reply = append(reply, '.', 'T')
}
return reply
}
// Checks if a serviceImport was created to map responses.
func (si *serviceImport) isRespServiceImport() bool {
return si != nil && si.response
}
// Sets the response theshold timer for a service export.
// Account lock should be held
func (se *serviceExport) setResponseThresholdTimer() {
if se.rtmr != nil {
return // Already set
}
se.rtmr = time.AfterFunc(se.respThresh, se.checkExpiredResponses)
}
// Account lock should be held
func (se *serviceExport) clearResponseThresholdTimer() bool {
if se.rtmr == nil {
return true
}
stopped := se.rtmr.Stop()
se.rtmr = nil
return stopped
}
// checkExpiredResponses will check for any pending responses that need to
// be cleaned up.
func (se *serviceExport) checkExpiredResponses() {
acc := se.acc
if acc == nil {
acc.mu.Lock()
se.clearResponseThresholdTimer()
acc.mu.Unlock()
return
}
var expired []*serviceImport
mints := time.Now().UnixNano() - int64(se.respThresh)
// TODO(dlc) - Should we release lock while doing this? Or only do these in batches?
// Should we break this up for responses only from this service export?
// Responses live on acc directly for fast inbound processsing for the _R_ wildcard.
// We could do another indirection at this level but just to get to the service export?
var totalResponses int
acc.mu.RLock()
for _, si := range acc.exports.responses {
if si.se == se {
totalResponses++
if si.ts <= mints {
expired = append(expired, si)
}
}
}
acc.mu.RUnlock()
for _, si := range expired {
acc.removeRespServiceImport(si, rsiTimeout)
}
// Pull out expired to determine if we have any left for timer.
totalResponses -= len(expired)
// Redo timer as needed.
acc.mu.Lock()
if totalResponses > 0 && se.rtmr != nil {
se.rtmr.Stop()
se.rtmr.Reset(se.respThresh)
} else {
se.clearResponseThresholdTimer()
}
acc.mu.Unlock()
}
// ServiceExportResponseThreshold returns the current threshold.
func (a *Account) ServiceExportResponseThreshold(export string) (time.Duration, error) {
a.mu.Lock()
defer a.mu.Unlock()
se := a.getServiceExport(export)
if se == nil {
return 0, fmt.Errorf("no export defined for %q", export)
}
return se.respThresh, nil
}
// SetServiceExportResponseThreshold sets the maximum time the system will a response to be delivered
// from a service export responder.
func (a *Account) SetServiceExportResponseThreshold(export string, maxTime time.Duration) error {
a.mu.Lock()
defer a.mu.Unlock()
if a.isClaimAccount() {
return fmt.Errorf("claim based accounts can not be updated directly")
}
lrt := a.lowestServiceExportResponseTime()
se := a.getServiceExport(export)
if se == nil {
return fmt.Errorf("no export defined for %q", export)
}
se.respThresh = maxTime
if nlrt := a.lowestServiceExportResponseTime(); nlrt != lrt {
a.updateAllClientsServiceExportResponseTime(nlrt)
}
return nil
}
// This is for internal service import responses.
func (a *Account) addRespServiceImport(dest *Account, to string, osi *serviceImport, tracking bool, header http.Header) *serviceImport {
nrr := string(osi.acc.newServiceReply(tracking))
a.mu.Lock()
rt := osi.rt
// dest is the requestor's account. a is the service responder with the export.
// Marked as internal here, that is how we distinguish.
si := &serviceImport{dest, nil, osi.se, nil, nrr, to, nil, 0, rt, nil, nil, nil, false, true, false, osi.share, false, false, false, nil}
if a.exports.responses == nil {
a.exports.responses = make(map[string]*serviceImport)
}
a.exports.responses[nrr] = si
// Always grab time and make sure response threshold timer is running.
si.ts = time.Now().UnixNano()
osi.se.setResponseThresholdTimer()
if rt == Singleton && tracking {
si.latency = osi.latency
si.tracking = true
si.trackingHdr = header
}
a.mu.Unlock()
// We do not do individual subscriptions here like we do on configured imports.
// We have an internal callback for all responses inbound to this account and
// will process appropriately there. This does not pollute the sublist and the caches.
// We do add in the reverse map such that we can detect loss of interest and do proper
// cleanup of this si as interest goes away.
dest.addReverseRespMapEntry(a, to, nrr)
return si
}
// AddStreamImportWithClaim will add in the stream import from a specific account with optional token.
func (a *Account) AddStreamImportWithClaim(account *Account, from, prefix string, imClaim *jwt.Import) error {
if account == nil {
return ErrMissingAccount
}
// First check to see if the account has authorized export of the subject.
if !account.checkStreamImportAuthorized(a, from, imClaim) {
return ErrStreamImportAuthorization
}
// Check prefix if it exists and make sure its a literal.
// Append token separator if not already present.
if prefix != "" {
// Make sure there are no wildcards here, this prefix needs to be a literal
// since it will be prepended to a publish subject.
if !subjectIsLiteral(prefix) {
return ErrStreamImportBadPrefix
}
if prefix[len(prefix)-1] != btsep {
prefix = prefix + string(btsep)
}
}
return a.AddMappedStreamImportWithClaim(account, from, prefix+from, imClaim)
}
// AddMappedStreamImport helper for AddMappedStreamImportWithClaim
func (a *Account) AddMappedStreamImport(account *Account, from, to string) error {
return a.AddMappedStreamImportWithClaim(account, from, to, nil)
}
// AddMappedStreamImportWithClaim will add in the stream import from a specific account with optional token.
func (a *Account) AddMappedStreamImportWithClaim(account *Account, from, to string, imClaim *jwt.Import) error {
if account == nil {
return ErrMissingAccount
}
// First check to see if the account has authorized export of the subject.
if !account.checkStreamImportAuthorized(a, from, imClaim) {
return ErrStreamImportAuthorization
}
if to == "" {
to = from
}
// Check if this forms a cycle.
if err := a.streamImportFormsCycle(account, to); err != nil {
return err
}
var (
usePub bool
tr *transform
err error
)
if subjectHasWildcard(from) {
if to == from {
usePub = true
} else {
// Create a transform
if tr, err = newTransform(from, transformTokenize(to)); err != nil {
return fmt.Errorf("failed to create mapping transform for stream import subject %q to %q: %v",
from, to, err)
}
}
}
a.mu.Lock()
if a.isStreamImportDuplicate(account, from) {
a.mu.Unlock()
return ErrStreamImportDuplicate
}
a.imports.streams = append(a.imports.streams, &streamImport{account, from, to, tr, nil, imClaim, usePub, false})
a.mu.Unlock()
return nil
}
// isStreamImportDuplicate checks for duplicate.
// Lock should be held.
func (a *Account) isStreamImportDuplicate(acc *Account, from string) bool {
for _, si := range a.imports.streams {
if si.acc == acc && si.from == from {
return true
}
}
return false
}
// AddStreamImport will add in the stream import from a specific account.
func (a *Account) AddStreamImport(account *Account, from, prefix string) error {
return a.AddStreamImportWithClaim(account, from, prefix, nil)
}
// IsPublicExport is a placeholder to denote a public export.
var IsPublicExport = []*Account(nil)
// AddStreamExport will add an export to the account. If accounts is nil
// it will signify a public export, meaning anyone can impoort.
func (a *Account) AddStreamExport(subject string, accounts []*Account) error {
if a == nil {
return ErrMissingAccount
}
a.mu.Lock()
defer a.mu.Unlock()
if a.exports.streams == nil {
a.exports.streams = make(map[string]*streamExport)
}
ea := a.exports.streams[subject]
if accounts != nil {
if ea == nil {
ea = &streamExport{}
}
// empty means auth required but will be import token.
if len(accounts) == 0 {
ea.tokenReq = true
} else {
if ea.approved == nil {
ea.approved = make(map[string]*Account, len(accounts))
}
for _, acc := range accounts {
ea.approved[acc.Name] = acc
}
}
}
a.exports.streams[subject] = ea
return nil
}
// Check if another account is authorized to import from us.
func (a *Account) checkStreamImportAuthorized(account *Account, subject string, imClaim *jwt.Import) bool {
// Find the subject in the exports list.
a.mu.RLock()
auth := a.checkStreamImportAuthorizedNoLock(account, subject, imClaim)
a.mu.RUnlock()
return auth
}
func (a *Account) checkStreamImportAuthorizedNoLock(account *Account, subject string, imClaim *jwt.Import) bool {
if a.exports.streams == nil || !IsValidSubject(subject) {
return false
}
return a.checkStreamExportApproved(account, subject, imClaim)
}
func (a *Account) checkAuth(ea *exportAuth, account *Account, imClaim *jwt.Import) bool {
// if ea is nil or ea.approved is nil, that denotes a public export
if ea == nil || (ea.approved == nil && !ea.tokenReq) {
return true
}
// Check if token required
if ea.tokenReq {
return a.checkActivation(account, imClaim, true)
}
// If we have a matching account we are authorized
_, ok := ea.approved[account.Name]
return ok
}
func (a *Account) checkStreamExportApproved(account *Account, subject string, imClaim *jwt.Import) bool {
// Check direct match of subject first
ea, ok := a.exports.streams[subject]
if ok {
if ea == nil {
return true
}
return a.checkAuth(&ea.exportAuth, account, imClaim)
}
// ok if we are here we did not match directly so we need to test each one.
// The import subject arg has to take precedence, meaning the export
// has to be a true subset of the import claim. We already checked for
// exact matches above.
tokens := strings.Split(subject, tsep)
for subj, ea := range a.exports.streams {
if isSubsetMatch(tokens, subj) {
if ea == nil {
return true
}
return a.checkAuth(&ea.exportAuth, account, imClaim)
}
}
return false
}
func (a *Account) checkServiceExportApproved(account *Account, subject string, imClaim *jwt.Import) bool {
// Check direct match of subject first
se, ok := a.exports.services[subject]
if ok {
// if se is nil or eq.approved is nil, that denotes a public export
if se == nil || (se.approved == nil && !se.tokenReq) {
return true
}
// Check if token required
if se.tokenReq {
return a.checkActivation(account, imClaim, true)
}
// If we have a matching account we are authorized
_, ok := se.approved[account.Name]
return ok
}
// ok if we are here we did not match directly so we need to test each one.
// The import subject arg has to take precedence, meaning the export
// has to be a true subset of the import claim. We already checked for
// exact matches above.
tokens := strings.Split(subject, tsep)
for subj, se := range a.exports.services {
if isSubsetMatch(tokens, subj) {
if se == nil || (se.approved == nil && !se.tokenReq) {
return true
}
// Check if token required
if se.tokenReq {
return a.checkActivation(account, imClaim, true)
}
_, ok := se.approved[account.Name]
return ok
}
}
return false
}
// Helper function to get a serviceExport.
// Lock should be held on entry.
func (a *Account) getServiceExport(subj string) *serviceExport {
se, ok := a.exports.services[subj]
// The export probably has a wildcard, so lookup that up.
if !ok {
se = a.getWildcardServiceExport(subj)
}
return se
}
// This helper is used when trying to match a serviceExport record that is
// represented by a wildcard.
// Lock should be held on entry.
func (a *Account) getWildcardServiceExport(from string) *serviceExport {
tokens := strings.Split(from, tsep)
for subj, se := range a.exports.services {
if isSubsetMatch(tokens, subj) {
return se
}
}
return nil
}
// Will fetch the activation token for an import.
func fetchActivation(url string) string {
// FIXME(dlc) - Make configurable.
c := &http.Client{Timeout: 2 * time.Second}
resp, err := c.Get(url)
if err != nil || resp == nil {
return ""
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return ""
}
return string(body)
}
// These are import stream specific versions for when an activation expires.
func (a *Account) streamActivationExpired(exportAcc *Account, subject string) {
a.mu.RLock()
if a.expired || a.imports.streams == nil {
a.mu.RUnlock()
return
}
var si *streamImport
for _, si = range a.imports.streams {
if si.acc == exportAcc && si.from == subject {
break
}
}
if si == nil || si.invalid {
a.mu.RUnlock()
return
}
a.mu.RUnlock()
if si.acc.checkActivation(a, si.claim, false) {
// The token has been updated most likely and we are good to go.
return
}
a.mu.Lock()
si.invalid = true
clients := make([]*client, 0, len(a.clients))
for c := range a.clients {
clients = append(clients, c)
}
awcsti := map[string]struct{}{a.Name: {}}
a.mu.Unlock()
for _, c := range clients {
c.processSubsOnConfigReload(awcsti)
}
}
// These are import service specific versions for when an activation expires.
func (a *Account) serviceActivationExpired(subject string) {
a.mu.RLock()
if a.expired || a.imports.services == nil {
a.mu.RUnlock()
return
}
si := a.imports.services[subject]
if si == nil || si.invalid {
a.mu.RUnlock()
return
}
a.mu.RUnlock()
if si.acc.checkActivation(a, si.claim, false) {
// The token has been updated most likely and we are good to go.
return
}
a.mu.Lock()
si.invalid = true
a.mu.Unlock()
}
// Fires for expired activation tokens. We could track this with timers etc.
// Instead we just re-analyze where we are and if we need to act.
func (a *Account) activationExpired(exportAcc *Account, subject string, kind jwt.ExportType) {
switch kind {
case jwt.Stream:
a.streamActivationExpired(exportAcc, subject)
case jwt.Service:
a.serviceActivationExpired(subject)
}
}
func isRevoked(revocations map[string]int64, subject string, issuedAt int64) bool {
if revocations == nil {
return false
}
if t, ok := revocations[subject]; !ok || t < issuedAt {
return false
}
return true
}
// checkActivation will check the activation token for validity.
func (a *Account) checkActivation(importAcc *Account, claim *jwt.Import, expTimer bool) bool {
if claim == nil || claim.Token == "" {
return false
}
// Create a quick clone so we can inline Token JWT.
clone := *claim
// We grab the token from a URL by hand here since we need expiration etc.
if url, err := url.Parse(clone.Token); err == nil && url.Scheme != "" {
clone.Token = fetchActivation(url.String())
}
vr := jwt.CreateValidationResults()
clone.Validate(importAcc.Name, vr)
if vr.IsBlocking(true) {
return false
}
act, err := jwt.DecodeActivationClaims(clone.Token)
if err != nil {
return false
}
if !a.isIssuerClaimTrusted(act) {
return false
}
vr = jwt.CreateValidationResults()
act.Validate(vr)
if vr.IsBlocking(true) {
return false
}
if act.Expires != 0 {
tn := time.Now().Unix()
if act.Expires <= tn {
return false
}
if expTimer {
expiresAt := time.Duration(act.Expires - tn)
time.AfterFunc(expiresAt*time.Second, func() {
importAcc.activationExpired(a, string(act.ImportSubject), claim.Type)
})
}
}
// Check for token revocation..
return !isRevoked(a.actsRevoked, act.Subject, act.IssuedAt)
}
// Returns true if the activation claim is trusted. That is the issuer matches
// the account or is an entry in the signing keys.
func (a *Account) isIssuerClaimTrusted(claims *jwt.ActivationClaims) bool {
// if no issuer account, issuer is the account
if claims.IssuerAccount == "" {
return true
}
// If the IssuerAccount is not us, then this is considered an error.
if a.Name != claims.IssuerAccount {
if a.srv != nil {
a.srv.Errorf("Invalid issuer account %q in activation claim (subject: %q - type: %q) for account %q",
claims.IssuerAccount, claims.Activation.ImportSubject, claims.Activation.ImportType, a.Name)
}
return false
}
return a.hasIssuerNoLock(claims.Issuer)
}
// Returns true if `a` and `b` stream imports are the same. Note that the
// check is done with the account's name, not the pointer. This is used
// during config reload where we are comparing current and new config
// in which pointers are different.
// No lock is acquired in this function, so it is assumed that the
// import maps are not changed while this executes.
func (a *Account) checkStreamImportsEqual(b *Account) bool {
if len(a.imports.streams) != len(b.imports.streams) {
return false
}
// Load the b imports into a map index by what we are looking for.
bm := make(map[string]*streamImport, len(b.imports.streams))
for _, bim := range b.imports.streams {
bm[bim.acc.Name+bim.from+bim.to] = bim
}
for _, aim := range a.imports.streams {
if _, ok := bm[aim.acc.Name+aim.from+aim.to]; !ok {
return false
}
}
return true
}
func (a *Account) checkStreamExportsEqual(b *Account) bool {
if len(a.exports.streams) != len(b.exports.streams) {
return false
}
for subj, aea := range a.exports.streams {
bea, ok := b.exports.streams[subj]
if !ok {
return false
}
if !reflect.DeepEqual(aea, bea) {
return false
}
}
return true
}
func (a *Account) checkServiceExportsEqual(b *Account) bool {
if len(a.exports.services) != len(b.exports.services) {
return false
}
for subj, aea := range a.exports.services {
bea, ok := b.exports.services[subj]
if !ok {
return false
}
if !reflect.DeepEqual(aea, bea) {
return false
}
}
return true
}
// Check if another account is authorized to route requests to this service.
func (a *Account) checkServiceImportAuthorized(account *Account, subject string, imClaim *jwt.Import) bool {
a.mu.RLock()
authorized := a.checkServiceImportAuthorizedNoLock(account, subject, imClaim)
a.mu.RUnlock()
return authorized
}
// Check if another account is authorized to route requests to this service.
func (a *Account) checkServiceImportAuthorizedNoLock(account *Account, subject string, imClaim *jwt.Import) bool {
// Find the subject in the services list.
if a.exports.services == nil {
return false
}
return a.checkServiceExportApproved(account, subject, imClaim)
}
// IsExpired returns expiration status.
func (a *Account) IsExpired() bool {
a.mu.RLock()
exp := a.expired
a.mu.RUnlock()
return exp
}
// Called when an account has expired.
func (a *Account) expiredTimeout() {
// Mark expired first.
a.mu.Lock()
a.expired = true
a.mu.Unlock()
// Collect the clients and expire them.
cs := make([]*client, 0, len(a.clients))
a.mu.RLock()
for c := range a.clients {
cs = append(cs, c)
}
a.mu.RUnlock()
for _, c := range cs {
c.accountAuthExpired()
}
}
// Sets the expiration timer for an account JWT that has it set.
func (a *Account) setExpirationTimer(d time.Duration) {
a.etmr = time.AfterFunc(d, a.expiredTimeout)
}
// Lock should be held
func (a *Account) clearExpirationTimer() bool {
if a.etmr == nil {
return true
}
stopped := a.etmr.Stop()
a.etmr = nil
return stopped
}
// checkUserRevoked will check if a user has been revoked.
func (a *Account) checkUserRevoked(nkey string, issuedAt int64) bool {
a.mu.RLock()
defer a.mu.RUnlock()
return isRevoked(a.usersRevoked, nkey, issuedAt)
}
// Check expiration and set the proper state as needed.
func (a *Account) checkExpiration(claims *jwt.ClaimsData) {
a.mu.Lock()
defer a.mu.Unlock()
a.clearExpirationTimer()
if claims.Expires == 0 {
a.expired = false
return
}
tn := time.Now().Unix()
if claims.Expires <= tn {
a.expired = true
return
}
expiresAt := time.Duration(claims.Expires - tn)
a.setExpirationTimer(expiresAt * time.Second)
a.expired = false
}
// hasIssuer returns true if the issuer matches the account
// issuer or it is a signing key for the account.
func (a *Account) hasIssuer(issuer string) bool {
a.mu.RLock()
hi := a.hasIssuerNoLock(issuer)
a.mu.RUnlock()
return hi
}
// hasIssuerNoLock is the unlocked version of hasIssuer
func (a *Account) hasIssuerNoLock(issuer string) bool {
// same issuer -- keep this for safety on the calling code
if a.Name == issuer {
return true
}
for i := 0; i < len(a.signingKeys); i++ {
if a.signingKeys[i] == issuer {
return true
}
}
return false
}
// Returns the loop detection subject used for leafnodes
func (a *Account) getLDSubject() string {
a.mu.RLock()
lds := a.lds
a.mu.RUnlock()
return lds
}
// Placeholder for signaling token auth required.
var tokenAuthReq = []*Account{}
func authAccounts(tokenReq bool) []*Account {
if tokenReq {
return tokenAuthReq
}
return nil
}
// SetAccountResolver will assign the account resolver.
func (s *Server) SetAccountResolver(ar AccountResolver) {
s.mu.Lock()
s.accResolver = ar
s.mu.Unlock()
}
// AccountResolver returns the registered account resolver.
func (s *Server) AccountResolver() AccountResolver {
s.mu.Lock()
ar := s.accResolver
s.mu.Unlock()
return ar
}
// isClaimAccount returns if this account is backed by a JWT claim.
// Lock should be held.
func (a *Account) isClaimAccount() bool {
return a.claimJWT != ""
}
// updateAccountClaims will update an existing account with new claims.
// This will replace any exports or imports previously defined.
// Lock MUST NOT be held upon entry.
func (s *Server) UpdateAccountClaims(a *Account, ac *jwt.AccountClaims) {
s.updateAccountClaimsWithRefresh(a, ac, true)
}
// updateAccountClaimsWithRefresh will update an existing account with new claims.
// If refreshImportingAccounts is true it will also update incomplete dependent accounts
// This will replace any exports or imports previously defined.
// Lock MUST NOT be held upon entry.
func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaims, refreshImportingAccounts bool) {
if a == nil {
return
}
s.Debugf("Updating account claims: %s", a.Name)
a.checkExpiration(ac.Claims())
a.mu.Lock()
// Clone to update, only select certain fields.
old := &Account{Name: a.Name, exports: a.exports, limits: a.limits, signingKeys: a.signingKeys}
// Reset exports and imports here.
// Exports is creating a whole new map.
a.exports = exportMap{}
// Imports are checked unlocked in processInbound, so we can't change out the struct here. Need to process inline.
if a.imports.streams != nil {
old.imports.streams = a.imports.streams
a.imports.streams = nil
}
if a.imports.services != nil {
old.imports.services = make(map[string]*serviceImport, len(a.imports.services))
}
for k, v := range a.imports.services {
old.imports.services[k] = v
delete(a.imports.services, k)
}
// Reset any notion of export revocations.
a.actsRevoked = nil
// update account signing keys
a.signingKeys = nil
signersChanged := false
if len(ac.SigningKeys) > 0 {
// insure copy the new keys and sort
a.signingKeys = append(a.signingKeys, ac.SigningKeys...)
sort.Strings(a.signingKeys)
}
if len(a.signingKeys) != len(old.signingKeys) {
signersChanged = true
} else {
for i := 0; i < len(old.signingKeys); i++ {
if a.signingKeys[i] != old.signingKeys[i] {
signersChanged = true
break
}
}
}
a.mu.Unlock()
gatherClients := func() []*client {
a.mu.RLock()
clients := make([]*client, 0, len(a.clients))
for c := range a.clients {
clients = append(clients, c)
}
a.mu.RUnlock()
return clients
}
jsEnabled := s.JetStreamEnabled()
if jsEnabled && a == s.SystemAccount() {
for _, export := range allJsExports {
s.Debugf("Adding jetstream service export %q for %s", export, a.Name)
if err := a.AddServiceExport(export, nil); err != nil {
s.Errorf("Error setting up jetstream service exports: %v", err)
}
}
}
for _, e := range ac.Exports {
switch e.Type {
case jwt.Stream:
s.Debugf("Adding stream export %q for %s", e.Subject, a.Name)
if err := a.AddStreamExport(string(e.Subject), authAccounts(e.TokenReq)); err != nil {
s.Debugf("Error adding stream export to account [%s]: %v", a.Name, err.Error())
}
case jwt.Service:
s.Debugf("Adding service export %q for %s", e.Subject, a.Name)
rt := Singleton
switch e.ResponseType {
case jwt.ResponseTypeStream:
rt = Streamed
case jwt.ResponseTypeChunked:
rt = Chunked
}
if err := a.AddServiceExportWithResponse(string(e.Subject), rt, authAccounts(e.TokenReq)); err != nil {
s.Debugf("Error adding service export to account [%s]: %v", a.Name, err)
}
if e.Latency != nil {
if err := a.TrackServiceExportWithSampling(string(e.Subject), string(e.Latency.Results), int(e.Latency.Sampling)); err != nil {
hdrNote := ""
if e.Latency.Sampling == jwt.Headers {
hdrNote = " (using headers)"
}
s.Debugf("Error adding latency tracking%s for service export to account [%s]: %v", hdrNote, a.Name, err)
}
}
}
// We will track these at the account level. Should not have any collisions.
if e.Revocations != nil {
a.mu.Lock()
if a.actsRevoked == nil {
a.actsRevoked = make(map[string]int64)
}
for k, t := range e.Revocations {
a.actsRevoked[k] = t
}
a.mu.Unlock()
}
}
var incompleteImports []*jwt.Import
for _, i := range ac.Imports {
// check tmpAccounts with priority
var acc *Account
var err error
if v, ok := s.tmpAccounts.Load(i.Account); ok {
acc = v.(*Account)
} else {
acc, err = s.lookupAccount(i.Account)
}
if acc == nil || err != nil {
s.Errorf("Can't locate account [%s] for import of [%v] %s (err=%v)", i.Account, i.Subject, i.Type, err)
incompleteImports = append(incompleteImports, i)
continue
}
switch i.Type {
case jwt.Stream:
s.Debugf("Adding stream import %s:%q for %s:%q", acc.Name, i.Subject, a.Name, i.To)
if err := a.AddStreamImportWithClaim(acc, string(i.Subject), string(i.To), i); err != nil {
s.Debugf("Error adding stream import to account [%s]: %v", a.Name, err.Error())
incompleteImports = append(incompleteImports, i)
}
case jwt.Service:
// FIXME(dlc) - need to add in respThresh here eventually.
s.Debugf("Adding service import %s:%q for %s:%q", acc.Name, i.Subject, a.Name, i.To)
if err := a.AddServiceImportWithClaim(acc, string(i.Subject), string(i.To), i); err != nil {
s.Debugf("Error adding service import to account [%s]: %v", a.Name, err.Error())
incompleteImports = append(incompleteImports, i)
}
}
}
// Now let's apply any needed changes from import/export changes.
if !a.checkStreamImportsEqual(old) {
awcsti := map[string]struct{}{a.Name: {}}
for _, c := range gatherClients() {
c.processSubsOnConfigReload(awcsti)
}
}
// Now check if stream exports have changed.
if !a.checkStreamExportsEqual(old) || signersChanged {
clients := map[*client]struct{}{}
// We need to check all accounts that have an import claim from this account.
awcsti := map[string]struct{}{}
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
// Move to the next if this account is actually account "a".
if acc.Name == a.Name {
return true
}
// TODO: checkStreamImportAuthorized() stack should not be trying
// to lock "acc". If we find that to be needed, we will need to
// rework this to ensure we don't lock acc.
acc.mu.Lock()
for _, im := range acc.imports.streams {
if im != nil && im.acc.Name == a.Name {
// Check for if we are still authorized for an import.
im.invalid = !a.checkStreamImportAuthorized(acc, im.from, im.claim)
awcsti[acc.Name] = struct{}{}
for c := range acc.clients {
clients[c] = struct{}{}
}
}
}
acc.mu.Unlock()
return true
})
// Now walk clients.
for c := range clients {
c.processSubsOnConfigReload(awcsti)
}
}
// Now check if service exports have changed.
if !a.checkServiceExportsEqual(old) || signersChanged {
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
// Move to the next if this account is actually account "a".
if acc.Name == a.Name {
return true
}
// TODO: checkServiceImportAuthorized() stack should not be trying
// to lock "acc". If we find that to be needed, we will need to
// rework this to ensure we don't lock acc.
acc.mu.Lock()
for _, si := range acc.imports.services {
if si != nil && si.acc.Name == a.Name {
// Check for if we are still authorized for an import.
si.invalid = !a.checkServiceImportAuthorized(acc, si.to, si.claim)
if si.latency != nil && !si.response {
// Make sure we should still be tracking latency.
if se := a.getServiceExport(si.to); se != nil {
si.latency = se.latency
}
}
}
}
acc.mu.Unlock()
return true
})
}
// Now make sure we shutdown the old service import subscriptions.
var sids [][]byte
a.mu.RLock()
c := a.ic
for _, si := range old.imports.services {
if c != nil && si.sid != nil {
sids = append(sids, si.sid)
}
}
a.mu.RUnlock()
for _, sid := range sids {
c.processUnsub(sid)
}
// Now do limits if they are present.
a.mu.Lock()
a.msubs = int32(ac.Limits.Subs)
a.mpay = int32(ac.Limits.Payload)
a.mconns = int32(ac.Limits.Conn)
a.mleafs = int32(ac.Limits.LeafNodeConn)
// Check for any revocations
if len(ac.Revocations) > 0 {
// We will always replace whatever we had with most current, so no
// need to look at what we have.
a.usersRevoked = make(map[string]int64, len(ac.Revocations))
for pk, t := range ac.Revocations {
a.usersRevoked[pk] = t
}
} else {
a.usersRevoked = nil
}
a.defaultPerms = buildPermissionsFromJwt(&ac.DefaultPermissions)
a.incomplete = len(incompleteImports) != 0
for _, i := range incompleteImports {
s.incompleteAccExporterMap.Store(i.Account, struct{}{})
}
if a.srv == nil {
a.srv = s
}
if jsEnabled {
if ac.Limits.JetStreamLimits.DiskStorage != 0 || ac.Limits.JetStreamLimits.MemoryStorage != 0 {
// JetStreamAccountLimits and jwt.JetStreamLimits use same value for unlimited
a.jsLimits = &JetStreamAccountLimits{
MaxMemory: ac.Limits.JetStreamLimits.MemoryStorage,
MaxStore: ac.Limits.JetStreamLimits.DiskStorage,
MaxStreams: int(ac.Limits.JetStreamLimits.Streams),
MaxConsumers: int(ac.Limits.JetStreamLimits.Consumer),
}
} else if a.jsLimits != nil {
// covers failed update followed by disable
a.jsLimits = nil
}
}
a.updated = time.Now()
a.mu.Unlock()
clients := gatherClients()
// Sort if we are over the limit.
if a.MaxTotalConnectionsReached() {
sort.Slice(clients, func(i, j int) bool {
return clients[i].start.After(clients[j].start)
})
}
if jsEnabled {
if err := s.configJetStream(a); err != nil {
s.Errorf("Error configuring jetstream for account [%s]: %v", a.Name, err.Error())
a.mu.Lock()
// Absent reload of js server cfg, this is going to be broken until js is disabled
a.incomplete = true
a.mu.Unlock()
}
}
for i, c := range clients {
a.mu.RLock()
exceeded := a.mconns != jwt.NoLimit && i >= int(a.mconns)
a.mu.RUnlock()
if exceeded {
c.maxAccountConnExceeded()
continue
}
c.mu.Lock()
c.applyAccountLimits()
theJWT := c.opts.JWT
c.mu.Unlock()
// Check for being revoked here. We use ac one to avoid the account lock.
if ac.Revocations != nil && theJWT != "" {
if juc, err := jwt.DecodeUserClaims(theJWT); err != nil {
c.Debugf("User JWT not valid: %v", err)
c.authViolation()
continue
} else if ok := ac.IsClaimRevoked(juc); ok {
c.sendErrAndDebug("User Authentication Revoked")
c.closeConnection(Revocation)
continue
}
}
}
// Check if the signing keys changed, might have to evict
if signersChanged {
for _, c := range clients {
c.mu.Lock()
sk := c.user.SigningKey
c.mu.Unlock()
if sk != "" && !a.hasIssuer(sk) {
c.closeConnection(AuthenticationViolation)
}
}
}
if _, ok := s.incompleteAccExporterMap.Load(old.Name); ok && refreshImportingAccounts {
s.incompleteAccExporterMap.Delete(old.Name)
s.accounts.Range(func(key, value interface{}) bool {
acc := value.(*Account)
acc.mu.RLock()
incomplete := acc.incomplete
name := acc.Name
// Must use jwt in account or risk failing on fetch
// This jwt may not be the same that caused exportingAcc to be in incompleteAccExporterMap
claimJWT := acc.claimJWT
acc.mu.RUnlock()
if incomplete && name != old.Name {
if accClaims, _, err := s.verifyAccountClaims(claimJWT); err == nil {
// Since claimJWT has not changed, acc can become complete
// but it won't alter incomplete for it's dependents accounts.
s.updateAccountClaimsWithRefresh(acc, accClaims, false)
// old.Name was deleted before ranging over accounts
// If it exists again, UpdateAccountClaims set it for failed imports of acc.
// So there was one import of acc that imported this account and failed again.
// Since this account just got updated, the import itself may be in error. So trace that.
if _, ok := s.incompleteAccExporterMap.Load(old.Name); ok {
s.incompleteAccExporterMap.Delete(old.Name)
s.Errorf("Account %s has issues importing account %s", name, old.Name)
}
}
}
return true
})
}
}
// Helper to build an internal account structure from a jwt.AccountClaims.
// Lock MUST NOT be held upon entry.
func (s *Server) buildInternalAccount(ac *jwt.AccountClaims) *Account {
acc := NewAccount(ac.Subject)
acc.Issuer = ac.Issuer
// Set this here since we are placing in s.tmpAccounts below and may be
// referenced by an route RS+, etc.
s.setAccountSublist(acc)
// We don't want to register an account that is in the process of
// being built, however, to solve circular import dependencies, we
// need to store it here.
s.tmpAccounts.Store(ac.Subject, acc)
s.UpdateAccountClaims(acc, ac)
return acc
}
// Helper to build Permissions from jwt.Permissions
// or return nil if none were specified
func buildPermissionsFromJwt(uc *jwt.Permissions) *Permissions {
if uc == nil {
return nil
}
var p *Permissions
if len(uc.Pub.Allow) > 0 || len(uc.Pub.Deny) > 0 {
if p == nil {
p = &Permissions{}
}
p.Publish = &SubjectPermission{}
p.Publish.Allow = uc.Pub.Allow
p.Publish.Deny = uc.Pub.Deny
}
if len(uc.Sub.Allow) > 0 || len(uc.Sub.Deny) > 0 {
if p == nil {
p = &Permissions{}
}
p.Subscribe = &SubjectPermission{}
p.Subscribe.Allow = uc.Sub.Allow
p.Subscribe.Deny = uc.Sub.Deny
}
if uc.Resp != nil {
if p == nil {
p = &Permissions{}
}
p.Response = &ResponsePermission{
MaxMsgs: uc.Resp.MaxMsgs,
Expires: uc.Resp.Expires,
}
validateResponsePermissions(p)
}
return p
}
// Helper to build internal NKeyUser.
func buildInternalNkeyUser(uc *jwt.UserClaims, acts map[string]struct{}, acc *Account) *NkeyUser {
nu := &NkeyUser{Nkey: uc.Subject, Account: acc, AllowedConnectionTypes: acts}
if uc.IssuerAccount != "" {
nu.SigningKey = uc.Issuer
}
// Now check for permissions.
var p = buildPermissionsFromJwt(&uc.Permissions)
if p == nil && acc.defaultPerms != nil {
p = acc.defaultPerms.clone()
}
nu.Permissions = p
return nu
}
const fetchTimeout = 2 * time.Second
func fetchAccount(res AccountResolver, name string) (string, error) {
if !nkeys.IsValidPublicAccountKey(name) {
return "", fmt.Errorf("will only fetch valid account keys")
}
return res.Fetch(name)
}
// AccountResolver interface. This is to fetch Account JWTs by public nkeys
type AccountResolver interface {
Fetch(name string) (string, error)
Store(name, jwt string) error
IsReadOnly() bool
Start(server *Server) error
IsTrackingUpdate() bool
Reload() error
Close()
}
// Default implementations of IsReadOnly/Start so only need to be written when changed
type resolverDefaultsOpsImpl struct{}
func (*resolverDefaultsOpsImpl) IsReadOnly() bool {
return true
}
func (*resolverDefaultsOpsImpl) IsTrackingUpdate() bool {
return false
}
func (*resolverDefaultsOpsImpl) Start(*Server) error {
return nil
}
func (*resolverDefaultsOpsImpl) Reload() error {
return nil
}
func (*resolverDefaultsOpsImpl) Close() {
}
func (*resolverDefaultsOpsImpl) Store(_, _ string) error {
return fmt.Errorf("Store operation not supported for URL Resolver")
}
// MemAccResolver is a memory only resolver.
// Mostly for testing.
type MemAccResolver struct {
sm sync.Map
resolverDefaultsOpsImpl
}
// Fetch will fetch the account jwt claims from the internal sync.Map.
func (m *MemAccResolver) Fetch(name string) (string, error) {
if j, ok := m.sm.Load(name); ok {
return j.(string), nil
}
return _EMPTY_, ErrMissingAccount
}
// Store will store the account jwt claims in the internal sync.Map.
func (m *MemAccResolver) Store(name, jwt string) error {
m.sm.Store(name, jwt)
return nil
}
func (ur *MemAccResolver) IsReadOnly() bool {
return false
}
// URLAccResolver implements an http fetcher.
type URLAccResolver struct {
url string
c *http.Client
resolverDefaultsOpsImpl
}
// NewURLAccResolver returns a new resolver for the given base URL.
func NewURLAccResolver(url string) (*URLAccResolver, error) {
if !strings.HasSuffix(url, "/") {
url += "/"
}
// FIXME(dlc) - Make timeout and others configurable.
// We create our own transport to amortize TLS.
tr := &http.Transport{
MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second,
}
ur := &URLAccResolver{
url: url,
c: &http.Client{Timeout: fetchTimeout, Transport: tr},
}
return ur, nil
}
// Fetch will fetch the account jwt claims from the base url, appending the
// account name onto the end.
func (ur *URLAccResolver) Fetch(name string) (string, error) {
url := ur.url + name
resp, err := ur.c.Get(url)
if err != nil {
return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", url, err)
} else if resp == nil {
return _EMPTY_, fmt.Errorf("could not fetch <%q>: no response", url)
} else if resp.StatusCode != http.StatusOK {
return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", url, resp.Status)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return _EMPTY_, err
}
return string(body), nil
}
// Resolver based on nats for synchronization and backing directory for storage.
type DirAccResolver struct {
*DirJWTStore
*Server
syncInterval time.Duration
}
func (dr *DirAccResolver) IsTrackingUpdate() bool {
return true
}
func (dr *DirAccResolver) Reload() error {
return dr.DirJWTStore.Reload()
}
func respondToUpdate(s *Server, respSubj string, acc string, message string, err error) {
if err == nil {
if acc == "" {
s.Debugf("%s", message)
} else {
s.Debugf("%s - %s", message, acc)
}
} else {
if acc == "" {
s.Errorf("%s - %s", message, err)
} else {
s.Errorf("%s - %s - %s", message, acc, err)
}
}
if respSubj == "" {
return
}
server := &ServerInfo{}
response := map[string]interface{}{"server": server}
m := map[string]interface{}{}
if acc != "" {
m["account"] = acc
}
if err == nil {
m["code"] = http.StatusOK
m["message"] = message
response["data"] = m
} else {
m["code"] = http.StatusInternalServerError
m["description"] = fmt.Sprintf("%s - %v", message, err)
response["error"] = m
}
s.sendInternalMsgLocked(respSubj, _EMPTY_, server, response)
}
func handleListRequest(store *DirJWTStore, s *Server, reply string) {
if reply == "" {
return
}
accIds := make([]string, 0, 1024)
if err := store.PackWalk(1, func(partialPackMsg string) {
if tk := strings.Split(partialPackMsg, "|"); len(tk) == 2 {
accIds = append(accIds, tk[0])
}
}); err != nil {
// let them timeout
s.Errorf("list request error: %v", err)
} else {
s.Debugf("list request responded with %d account ids", len(accIds))
server := &ServerInfo{}
response := map[string]interface{}{"server": server, "data": accIds}
s.sendInternalMsgLocked(reply, _EMPTY_, server, response)
}
}
func handleDeleteRequest(store *DirJWTStore, s *Server, msg []byte, reply string) {
var accIds []interface{}
var subj, sysAccName string
if sysAcc := s.SystemAccount(); sysAcc != nil {
sysAccName = sysAcc.GetName()
}
// TODO Can allow keys (issuer) to delete accounts they issued and operator key to delete all accounts.
// For now only operator is allowed to delete
gk, err := jwt.DecodeGeneric(string(msg))
if err == nil {
subj = gk.Subject
if store.deleteType == NoDelete {
err = fmt.Errorf("delete must be enabled in server config")
} else if subj != gk.Issuer {
err = fmt.Errorf("not self signed")
} else if !s.isTrustedIssuer(gk.Issuer) {
err = fmt.Errorf("not trusted")
} else if store.operator != gk.Issuer {
err = fmt.Errorf("needs to be the operator operator")
} else if list, ok := gk.Data["accounts"]; !ok {
err = fmt.Errorf("malformed request")
} else if accIds, ok = list.([]interface{}); !ok {
err = fmt.Errorf("malformed request")
} else {
for _, entry := range accIds {
if acc, ok := entry.(string); !ok ||
acc == "" || !nkeys.IsValidPublicAccountKey(acc) {
err = fmt.Errorf("malformed request")
break
} else if acc == sysAccName {
err = fmt.Errorf("not allowed to delete system account")
break
}
}
}
}
if err != nil {
respondToUpdate(s, reply, "", fmt.Sprintf("delete accounts request by %s failed", subj), err)
return
}
errs := []string{}
passCnt := 0
for _, acc := range accIds {
if err := store.delete(acc.(string)); err != nil {
errs = append(errs, err.Error())
} else {
passCnt++
}
}
if len(errs) == 0 {
respondToUpdate(s, reply, "", fmt.Sprintf("deleted %d accounts", passCnt), nil)
} else {
respondToUpdate(s, reply, "", fmt.Sprintf("deleted %d accounts, failed for %d", passCnt, len(errs)),
errors.New(strings.Join(errs, "<\n")))
}
}
func getOperator(s *Server) (string, error) {
var op string
if opts := s.getOpts(); opts != nil && len(opts.TrustedOperators) > 0 {
op = opts.TrustedOperators[0].Subject
}
if op == "" {
return "", fmt.Errorf("no operator found")
}
return op, nil
}
func (dr *DirAccResolver) Start(s *Server) error {
op, err := getOperator(s)
if err != nil {
return err
}
dr.Lock()
defer dr.Unlock()
dr.Server = s
dr.operator = op
dr.DirJWTStore.changed = func(pubKey string) {
if v, ok := s.accounts.Load(pubKey); !ok {
} else if jwt, err := dr.LoadAcc(pubKey); err != nil {
s.Errorf("update got error on load: %v", err)
} else if err := s.updateAccountWithClaimJWT(v.(*Account), jwt); err != nil {
s.Errorf("update resulted in error %v", err)
}
}
packRespIb := s.newRespInbox()
for _, reqSub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} {
// subscribe to account jwt update requests
if _, err := s.sysSubscribe(fmt.Sprintf(reqSub, "*"), func(_ *subscription, _ *client, subj, resp string, msg []byte) {
pubKey := ""
tk := strings.Split(subj, tsep)
if len(tk) == accUpdateTokensNew {
pubKey = tk[accReqAccIndex]
} else if len(tk) == accUpdateTokensOld {
pubKey = tk[accUpdateAccIdxOld]
} else {
s.Debugf("jwt update skipped due to bad subject %q", subj)
return
}
if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil {
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else if claim.Subject != pubKey {
err := errors.New("subject does not match jwt content")
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else if err := dr.save(pubKey, string(msg)); err != nil {
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else {
respondToUpdate(s, resp, pubKey, "jwt updated", nil)
}
}); err != nil {
return fmt.Errorf("error setting up update handling: %v", err)
}
}
if _, err := s.sysSubscribe(accClaimsReqSubj, func(_ *subscription, _ *client, subj, resp string, msg []byte) {
if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil {
respondToUpdate(s, resp, "n/a", "jwt update resulted in error", err)
} else if err := dr.save(claim.Subject, string(msg)); err != nil {
respondToUpdate(s, resp, claim.Subject, "jwt update resulted in error", err)
} else {
respondToUpdate(s, resp, claim.Subject, "jwt updated", nil)
}
}); err != nil {
return fmt.Errorf("error setting up update handling: %v", err)
}
// respond to lookups with our version
if _, err := s.sysSubscribe(fmt.Sprintf(accLookupReqSubj, "*"), func(_ *subscription, _ *client, subj, reply string, msg []byte) {
if reply == "" {
return
}
tk := strings.Split(subj, tsep)
if len(tk) != accLookupReqTokens {
return
}
if theJWT, err := dr.DirJWTStore.LoadAcc(tk[accReqAccIndex]); err != nil {
s.Errorf("Merging resulted in error: %v", err)
} else {
s.sendInternalMsgLocked(reply, "", nil, []byte(theJWT))
}
}); err != nil {
return fmt.Errorf("error setting up lookup request handling: %v", err)
}
// respond to pack requests with one or more pack messages
// an empty message signifies the end of the response responder
if _, err := s.sysSubscribeQ(accPackReqSubj, "responder", func(_ *subscription, _ *client, _, reply string, theirHash []byte) {
if reply == "" {
return
}
ourHash := dr.DirJWTStore.Hash()
if bytes.Equal(theirHash, ourHash[:]) {
s.sendInternalMsgLocked(reply, "", nil, []byte{})
s.Debugf("pack request matches hash %x", ourHash[:])
} else if err := dr.DirJWTStore.PackWalk(1, func(partialPackMsg string) {
s.sendInternalMsgLocked(reply, "", nil, []byte(partialPackMsg))
}); err != nil {
// let them timeout
s.Errorf("pack request error: %v", err)
} else {
s.Debugf("pack request hash %x - finished responding with hash %x", theirHash, ourHash)
s.sendInternalMsgLocked(reply, "", nil, []byte{})
}
}); err != nil {
return fmt.Errorf("error setting up pack request handling: %v", err)
}
// respond to list requests with one message containing all account ids
if _, err := s.sysSubscribe(accListReqSubj, func(_ *subscription, _ *client, _, reply string, _ []byte) {
handleListRequest(dr.DirJWTStore, s, reply)
}); err != nil {
return fmt.Errorf("error setting up list request handling: %v", err)
}
if _, err := s.sysSubscribe(accDeleteReqSubj, func(_ *subscription, _ *client, _, reply string, msg []byte) {
handleDeleteRequest(dr.DirJWTStore, s, msg, reply)
}); err != nil {
return fmt.Errorf("error setting up delete request handling: %v", err)
}
// embed pack responses into store
if _, err := s.sysSubscribe(packRespIb, func(_ *subscription, _ *client, _, _ string, msg []byte) {
hash := dr.DirJWTStore.Hash()
if len(msg) == 0 { // end of response stream
s.Debugf("Merging Finished and resulting in: %x", dr.DirJWTStore.Hash())
return
} else if err := dr.DirJWTStore.Merge(string(msg)); err != nil {
s.Errorf("Merging resulted in error: %v", err)
} else {
s.Debugf("Merging succeeded and changed %x to %x", hash, dr.DirJWTStore.Hash())
}
}); err != nil {
return fmt.Errorf("error setting up pack response handling: %v", err)
}
// periodically send out pack message
quit := s.quitCh
s.startGoRoutine(func() {
defer s.grWG.Done()
ticker := time.NewTicker(dr.syncInterval)
for {
select {
case <-quit:
ticker.Stop()
return
case <-ticker.C:
}
ourHash := dr.DirJWTStore.Hash()
s.Debugf("Checking store state: %x", ourHash)
s.sendInternalMsgLocked(accPackReqSubj, packRespIb, nil, ourHash[:])
}
})
s.Noticef("Managing all jwt in exclusive directory %s", dr.directory)
return nil
}
func (dr *DirAccResolver) Fetch(name string) (string, error) {
if theJWT, err := dr.LoadAcc(name); theJWT != "" {
return theJWT, nil
} else {
dr.Lock()
srv := dr.Server
dr.Unlock()
if srv == nil {
return "", err
}
return srv.fetch(dr, name) // lookup from other server
}
}
func (dr *DirAccResolver) Store(name, jwt string) error {
return dr.saveIfNewer(name, jwt)
}
func NewDirAccResolver(path string, limit int64, syncInterval time.Duration, delete bool) (*DirAccResolver, error) {
if limit == 0 {
limit = math.MaxInt64
}
if syncInterval <= 0 {
syncInterval = time.Minute
}
deleteType := NoDelete
if delete {
deleteType = RenameDeleted
}
store, err := NewExpiringDirJWTStore(path, false, true, deleteType, 0, limit, false, 0, nil)
if err != nil {
return nil, err
}
return &DirAccResolver{store, nil, syncInterval}, nil
}
// Caching resolver using nats for lookups and making use of a directory for storage
type CacheDirAccResolver struct {
DirAccResolver
ttl time.Duration
}
func (s *Server) fetch(res AccountResolver, name string) (string, error) {
if s == nil {
return "", ErrNoAccountResolver
}
respC := make(chan []byte, 1)
accountLookupRequest := fmt.Sprintf(accLookupReqSubj, name)
s.mu.Lock()
if s.sys == nil || s.sys.replies == nil {
s.mu.Unlock()
return "", fmt.Errorf("eventing shut down")
}
replySubj := s.newRespInbox()
replies := s.sys.replies
// Store our handler.
replies[replySubj] = func(sub *subscription, _ *client, subject, _ string, msg []byte) {
clone := make([]byte, len(msg))
copy(clone, msg)
s.mu.Lock()
if _, ok := replies[replySubj]; ok {
select {
case respC <- clone: // only use first response and only if there is still interest
default:
}
}
s.mu.Unlock()
}
s.sendInternalMsg(accountLookupRequest, replySubj, nil, []byte{})
quit := s.quitCh
s.mu.Unlock()
var err error
var theJWT string
select {
case <-quit:
err = errors.New("fetching jwt failed due to shutdown")
case <-time.After(fetchTimeout):
err = errors.New("fetching jwt timed out")
case m := <-respC:
if err = res.Store(name, string(m)); err == nil {
theJWT = string(m)
}
}
s.mu.Lock()
delete(replies, replySubj)
s.mu.Unlock()
close(respC)
return theJWT, err
}
func NewCacheDirAccResolver(path string, limit int64, ttl time.Duration, _ ...dirJWTStoreOption) (*CacheDirAccResolver, error) {
if limit <= 0 {
limit = 1_000
}
store, err := NewExpiringDirJWTStore(path, false, true, HardDelete, 0, limit, true, ttl, nil)
if err != nil {
return nil, err
}
return &CacheDirAccResolver{DirAccResolver{store, nil, 0}, ttl}, nil
}
func (dr *CacheDirAccResolver) Start(s *Server) error {
op, err := getOperator(s)
if err != nil {
return err
}
dr.Lock()
defer dr.Unlock()
dr.Server = s
dr.operator = op
dr.DirJWTStore.changed = func(pubKey string) {
if v, ok := s.accounts.Load(pubKey); !ok {
} else if jwt, err := dr.LoadAcc(pubKey); err != nil {
s.Errorf("update got error on load: %v", err)
} else if err := s.updateAccountWithClaimJWT(v.(*Account), jwt); err != nil {
s.Errorf("update resulted in error %v", err)
}
}
for _, reqSub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} {
// subscribe to account jwt update requests
if _, err := s.sysSubscribe(fmt.Sprintf(reqSub, "*"), func(_ *subscription, _ *client, subj, resp string, msg []byte) {
pubKey := ""
tk := strings.Split(subj, tsep)
if len(tk) == accUpdateTokensNew {
pubKey = tk[accReqAccIndex]
} else if len(tk) == accUpdateTokensOld {
pubKey = tk[accUpdateAccIdxOld]
} else {
s.Debugf("jwt update cache skipped due to bad subject %q", subj)
return
}
if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil {
respondToUpdate(s, resp, pubKey, "jwt update cache resulted in error", err)
} else if claim.Subject != pubKey {
err := errors.New("subject does not match jwt content")
respondToUpdate(s, resp, pubKey, "jwt update cache resulted in error", err)
} else if _, ok := s.accounts.Load(pubKey); !ok {
respondToUpdate(s, resp, pubKey, "jwt update cache skipped", nil)
} else if err := dr.save(pubKey, string(msg)); err != nil {
respondToUpdate(s, resp, pubKey, "jwt update cache resulted in error", err)
} else {
respondToUpdate(s, resp, pubKey, "jwt updated cache", nil)
}
}); err != nil {
return fmt.Errorf("error setting up update handling: %v", err)
}
}
if _, err := s.sysSubscribe(accClaimsReqSubj, func(_ *subscription, _ *client, subj, resp string, msg []byte) {
if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil {
respondToUpdate(s, resp, "n/a", "jwt update cache resulted in error", err)
} else if _, ok := s.accounts.Load(claim.Subject); !ok {
respondToUpdate(s, resp, claim.Subject, "jwt update cache skipped", nil)
} else if err := dr.save(claim.Subject, string(msg)); err != nil {
respondToUpdate(s, resp, claim.Subject, "jwt update cache resulted in error", err)
} else {
respondToUpdate(s, resp, claim.Subject, "jwt updated cache", nil)
}
}); err != nil {
return fmt.Errorf("error setting up update handling: %v", err)
}
// respond to list requests with one message containing all account ids
if _, err := s.sysSubscribe(accListReqSubj, func(_ *subscription, _ *client, _, reply string, _ []byte) {
handleListRequest(dr.DirJWTStore, s, reply)
}); err != nil {
return fmt.Errorf("error setting up list request handling: %v", err)
}
if _, err := s.sysSubscribe(accDeleteReqSubj, func(_ *subscription, _ *client, _, reply string, msg []byte) {
handleDeleteRequest(dr.DirJWTStore, s, msg, reply)
}); err != nil {
return fmt.Errorf("error setting up list request handling: %v", err)
}
s.Noticef("Managing some jwt in exclusive directory %s", dr.directory)
return nil
}
func (dr *CacheDirAccResolver) Reload() error {
return dr.DirAccResolver.Reload()
}
// Transforms for arbitrarily mapping subjects from one to another for maps, tees and filters.
// These can also be used for proper mapping on wildcard exports/imports.
// These will be grouped and caching and locking are assumed to be in the upper layers.
type transform struct {
src, dest string
dtoks []string
stoks []string
dtpi []int8
}
// Helper to pull raw place holder index. Returns -1 if not a place holder.
func placeHolderIndex(token string) int {
if len(token) > 1 && token[0] == '$' {
var tp int
if n, err := fmt.Sscanf(token, "$%d", &tp); err == nil && n == 1 {
return tp
}
}
return -1
}
// newTransform will create a new transform checking the src and dest subjects for accuracy.
func newTransform(src, dest string) (*transform, error) {
// Both entries need to be valid subjects.
sv, stokens, npwcs, hasFwc := subjectInfo(src)
dv, dtokens, dnpwcs, dHasFwc := subjectInfo(dest)
// Make sure both are valid, match fwc if present and there are no pwcs in the dest subject.
if !sv || !dv || dnpwcs > 0 || hasFwc != dHasFwc {
return nil, ErrBadSubject
}
var dtpi []int8
// If the src has partial wildcards then the dest needs to have the token place markers.
if npwcs > 0 || hasFwc {
// We need to count to make sure that the dest has token holders for the pwcs.
sti := make(map[int]int)
for i, token := range stokens {
if len(token) == 1 && token[0] == pwc {
sti[len(sti)+1] = i
}
}
nphs := 0
for _, token := range dtokens {
tp := placeHolderIndex(token)
if tp >= 0 {
if tp > npwcs {
return nil, ErrBadSubject
}
nphs++
// Now build up our runtime mapping from dest to source tokens.
dtpi = append(dtpi, int8(sti[tp]))
} else {
dtpi = append(dtpi, -1)
}
}
if nphs != npwcs {
return nil, ErrBadSubject
}
}
return &transform{src: src, dest: dest, dtoks: dtokens, stoks: stokens, dtpi: dtpi}, nil
}
// match will take a literal published subject that is associated with a client and will match and transform
// the subject if possible.
// TODO(dlc) - We could add in client here to allow for things like foo -> foo.$ACCOUNT
func (tr *transform) match(subject string) (string, error) {
// Tokenize the subject. This should always be a literal subject.
tsa := [32]string{}
tts := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tts = append(tts, subject[start:i])
start = i + 1
}
}
tts = append(tts, subject[start:])
if !isValidLiteralSubject(tts) {
return "", ErrBadSubject
}
if isSubsetMatch(tts, tr.src) {
return tr.transform(tts)
}
return "", ErrNoTransforms
}
// Do not need to match, just transform.
func (tr *transform) transformSubject(subject string) (string, error) {
// Tokenize the subject.
tsa := [32]string{}
tts := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tts = append(tts, subject[start:i])
start = i + 1
}
}
tts = append(tts, subject[start:])
return tr.transform(tts)
}
// Do a transform on the subject to the dest subject.
func (tr *transform) transform(tokens []string) (string, error) {
if len(tr.dtpi) == 0 {
return tr.dest, nil
}
var b strings.Builder
var token string
// We need to walk destination tokens and create the mapped subject pulling tokens from src.
// This is slow and that is ok, transforms should have caching layer in front for mapping transforms
// and export/import semantics with streams and services.
li := len(tr.dtpi) - 1
for i, index := range tr.dtpi {
// <0 means use destination token.
if index < 0 {
token = tr.dtoks[i]
// Break if fwc
if len(token) == 1 && token[0] == fwc {
break
}
} else {
// >= 0 means use source map index to figure out which source token to pull.
token = tokens[index]
}
b.WriteString(token)
if i < li {
b.WriteByte(btsep)
}
}
// We may have more source tokens available. This happens with ">".
if tr.dtoks[len(tr.dtoks)-1] == ">" {
for sli, i := len(tokens)-1, len(tr.stoks)-1; i < len(tokens); i++ {
b.WriteString(tokens[i])
if i < sli {
b.WriteByte(btsep)
}
}
}
return b.String(), nil
}
// Reverse a transform.
func (tr *transform) reverse() *transform {
if len(tr.dtpi) == 0 {
rtr, _ := newTransform(tr.dest, tr.src)
return rtr
}
// If we are here we need to dynamically get the correct reverse
// of this transform.
nsrc, phs := transformUntokenize(tr.dest)
var nda []string
for _, token := range tr.stoks {
if token == "*" {
if len(phs) == 0 {
// TODO(dlc) - Should not happen
return nil
}
nda = append(nda, phs[0])
phs = phs[1:]
} else {
nda = append(nda, token)
}
}
ndest := strings.Join(nda, tsep)
rtr, _ := newTransform(nsrc, ndest)
return rtr
}
| 1 | 12,217 | staticcheck flags this: replace with `for k := ..` | nats-io-nats-server | go |
@@ -132,8 +132,8 @@ def GMLAN_InitDiagnostics(
p = GMLAN() / GMLAN_PM(subfunction="enableProgrammingMode")
if verbose:
print("Sending %s" % repr(p))
- sock.send(p)
time.sleep(0.05)
+ sock.sr1(p, timeout=0.001, verbose=False)
return True
return False
| 1 | #! /usr/bin/env python
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Markus Schroetter <project.m.schroetter@gmail.com>
# Copyright (C) Nils Weiss <nils@we155.de>
# This program is published under a GPLv2 license
# scapy.contrib.description = GMLAN Utilities
# scapy.contrib.status = loads
import time
from scapy.compat import Optional, cast, Callable
from scapy.contrib.automotive.gm.gmlan import GMLAN, GMLAN_SA, GMLAN_RD, \
GMLAN_TD, GMLAN_PM, GMLAN_RMBA
from scapy.config import conf
from scapy.packet import Packet
from scapy.supersocket import SuperSocket
from scapy.contrib.isotp import ISOTPSocket
from scapy.error import warning, log_loading
from scapy.utils import PeriodicSenderThread
__all__ = ["GMLAN_TesterPresentSender", "GMLAN_InitDiagnostics",
"GMLAN_GetSecurityAccess", "GMLAN_RequestDownload",
"GMLAN_TransferData", "GMLAN_TransferPayload",
"GMLAN_ReadMemoryByAddress", "GMLAN_BroadcastSocket"]
log_loading.info("\"conf.contribs['GMLAN']"
"['treat-response-pending-as-answer']\" set to True). This "
"is required by the GMLAN-Utils module to operate "
"correctly.")
try:
conf.contribs['GMLAN']['treat-response-pending-as-answer'] = False
except KeyError:
conf.contribs['GMLAN'] = {'treat-response-pending-as-answer': False}
# Helper function
def _check_response(resp, verbose):
# type: (Optional[Packet], Optional[bool]) -> bool
if resp is None:
if verbose:
print("Timeout.")
return False
if verbose:
resp.show()
return resp.service != 0x7f # NegativeResponse
class GMLAN_TesterPresentSender(PeriodicSenderThread):
def __init__(self, sock, pkt=GMLAN(service="TesterPresent"), interval=2):
# type: (SuperSocket, Packet, int) -> None
""" Thread to send GMLAN TesterPresent packets periodically
:param sock: socket where packet is sent periodically
:param pkt: packet to send
:param interval: interval between two packets
"""
PeriodicSenderThread.__init__(self, sock, pkt, interval)
def run(self):
# type: () -> None
while not self._stopped.is_set() and not self._socket.closed:
for p in self._pkts:
self._socket.sr1(p, verbose=False, timeout=0.1)
time.sleep(self._interval)
if self._stopped.is_set() or self._socket.closed:
break
def GMLAN_InitDiagnostics(
sock, # type: SuperSocket
broadcast_socket=None, # type: Optional[SuperSocket]
timeout=None, # type: Optional[int]
verbose=None, # type: Optional[bool]
retry=0 # type: int
):
# type: (...) -> bool
""" Send messages to put an ECU into diagnostic/programming state.
:param sock: socket for communication.
:param broadcast_socket: socket for broadcasting. If provided some message
will be sent as broadcast. Recommended when used
on a network with several ECUs.
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level
:param retry: number of retries in case of failure.
:return: True on success else False
"""
# Helper function
def _send_and_check_response(sock, req, timeout, verbose):
# type: (SuperSocket, Packet, Optional[int], Optional[bool]) -> bool
if verbose:
print("Sending %s" % repr(req))
resp = sock.sr1(req, timeout=timeout, verbose=False)
return _check_response(resp, verbose)
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
while retry >= 0:
retry -= 1
# DisableNormalCommunication
p = GMLAN(service="DisableNormalCommunication")
if broadcast_socket is None:
if not _send_and_check_response(sock, p, timeout, verbose):
continue
else:
if verbose:
print("Sending %s as broadcast" % repr(p))
broadcast_socket.send(p)
time.sleep(0.05)
# ReportProgrammedState
p = GMLAN(service="ReportProgrammingState")
if not _send_and_check_response(sock, p, timeout, verbose):
continue
# ProgrammingMode requestProgramming
p = GMLAN() / GMLAN_PM(subfunction="requestProgrammingMode")
if not _send_and_check_response(sock, p, timeout, verbose):
continue
time.sleep(0.05)
# InitiateProgramming enableProgramming
# No response expected
p = GMLAN() / GMLAN_PM(subfunction="enableProgrammingMode")
if verbose:
print("Sending %s" % repr(p))
sock.send(p)
time.sleep(0.05)
return True
return False
def GMLAN_GetSecurityAccess(
sock, # type: SuperSocket
key_function, # type: Callable[[int], int]
level=1, # type: int
timeout=None, # type: Optional[int]
verbose=None, # type: Optional[bool]
retry=0 # type: int
):
# type: (...) -> bool
""" Authenticate on ECU. Implements Seey-Key procedure.
:param sock: socket to send the message on.
:param key_function: function implementing the key algorithm.
:param level: level of access
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level
:param retry: number of retries in case of failure.
:return: True on success.
"""
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
if key_function is None:
return False
if level % 2 == 0:
warning("Parameter Error: Level must be an odd number.")
return False
while retry >= 0:
retry -= 1
request = GMLAN() / GMLAN_SA(subfunction=level)
if verbose:
print("Requesting seed..")
resp = sock.sr1(request, timeout=timeout, verbose=0)
if not _check_response(resp, verbose):
if resp is not None and resp.returnCode == 0x37 and retry:
if verbose:
print("RequiredTimeDelayNotExpired. Wait 10s.")
time.sleep(10)
if verbose:
print("Negative Response.")
continue
seed = cast(Packet, resp).securitySeed
if seed == 0:
if verbose:
print("ECU security already unlocked. (seed is 0x0000)")
return True
keypkt = GMLAN() / GMLAN_SA(subfunction=level + 1,
securityKey=key_function(seed))
if verbose:
print("Responding with key..")
resp = sock.sr1(keypkt, timeout=timeout, verbose=0)
if resp is None:
if verbose:
print("Timeout.")
continue
if verbose:
resp.show()
if resp.service == 0x67:
if verbose:
print("SecurityAccess granted.")
return True
# Invalid Key
elif resp.service == 0x7f and resp.returnCode == 0x35:
if verbose:
print("Key invalid")
continue
return False
def GMLAN_RequestDownload(sock, length, timeout=None, verbose=None, retry=0):
# type: (SuperSocket, int, Optional[int], Optional[bool], int) -> bool
""" Send RequestDownload message.
Usually used before calling TransferData.
:param sock: socket to send the message on.
:param length: value for the message's parameter 'unCompressedMemorySize'.
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level.
:param retry: number of retries in case of failure.
:return: True on success
"""
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
while retry >= 0:
# RequestDownload
pkt = GMLAN() / GMLAN_RD(memorySize=length)
resp = sock.sr1(pkt, timeout=timeout, verbose=0)
if _check_response(resp, verbose):
return True
retry -= 1
if retry >= 0 and verbose:
print("Retrying..")
return False
def GMLAN_TransferData(
sock, # type: SuperSocket
addr, # type: int
payload, # type: bytes
maxmsglen=None, # type: Optional[int]
timeout=None, # type: Optional[int]
verbose=None, # type: Optional[bool]
retry=0 # type: int
):
# type: (...) -> bool
""" Send TransferData message.
Usually used after calling RequestDownload.
:param sock: socket to send the message on.
:param addr: destination memory address on the ECU.
:param payload: data to be sent.
:param maxmsglen: maximum length of a single iso-tp message.
default: maximum length
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level.
:param retry: number of retries in case of failure.
:return: True on success.
"""
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
startretry = retry
scheme = conf.contribs['GMLAN']['GMLAN_ECU_AddressingScheme']
if addr < 0 or addr >= 2**(8 * scheme):
warning("Error: Invalid address %s for scheme %s",
hex(addr), str(scheme))
return False
# max size of dataRecord according to gmlan protocol
if maxmsglen is None or maxmsglen <= 0 or maxmsglen > (4093 - scheme):
maxmsglen = (4093 - scheme)
maxmsglen = cast(int, maxmsglen)
for i in range(0, len(payload), maxmsglen):
retry = startretry
while True:
if len(payload[i:]) > maxmsglen:
transdata = payload[i:i + maxmsglen]
else:
transdata = payload[i:]
pkt = GMLAN() / GMLAN_TD(startingAddress=addr + i,
dataRecord=transdata)
resp = sock.sr1(pkt, timeout=timeout, verbose=0)
if _check_response(resp, verbose):
break
retry -= 1
if retry >= 0:
if verbose:
print("Retrying..")
else:
return False
return True
def GMLAN_TransferPayload(
sock, # type: SuperSocket
addr, # type: int
payload, # type: bytes
maxmsglen=None, # type: Optional[int]
timeout=None, # type: Optional[int]
verbose=None, # type: Optional[bool]
retry=0 # type: int
):
# type: (...) -> bool
""" Send data by using GMLAN services.
:param sock: socket to send the data on.
:param addr: destination memory address on the ECU.
:param payload: data to be sent.
:param maxmsglen: maximum length of a single iso-tp message.
default: maximum length
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level.
:param retry: number of retries in case of failure.
:return: True on success.
"""
if not GMLAN_RequestDownload(sock, len(payload), timeout=timeout,
verbose=verbose, retry=retry):
return False
if not GMLAN_TransferData(sock, addr, payload, maxmsglen=maxmsglen,
timeout=timeout, verbose=verbose, retry=retry):
return False
return True
def GMLAN_ReadMemoryByAddress(
sock, # type: SuperSocket
addr, # type: int
length, # type: int
timeout=None, # type: Optional[int]
verbose=None, # type: Optional[bool]
retry=0 # type: int
):
# type: (...) -> Optional[bytes]
""" Read data from ECU memory.
:param sock: socket to send the data on.
:param addr: source memory address on the ECU.
:param length: bytes to read.
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level.
:param retry: number of retries in case of failure.
:return: bytes red or None
"""
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
scheme = conf.contribs['GMLAN']['GMLAN_ECU_AddressingScheme']
if addr < 0 or addr >= 2**(8 * scheme):
warning("Error: Invalid address %s for scheme %s",
hex(addr), str(scheme))
return None
# max size of dataRecord according to gmlan protocol
if length <= 0 or length > (4094 - scheme):
warning("Error: Invalid length %s for scheme %s. "
"Choose between 0x1 and %s",
hex(length), str(scheme), hex(4094 - scheme))
return None
while retry >= 0:
# RequestDownload
pkt = GMLAN() / GMLAN_RMBA(memoryAddress=addr, memorySize=length)
resp = sock.sr1(pkt, timeout=timeout, verbose=0)
if _check_response(resp, verbose):
return cast(Packet, resp).dataRecord
retry -= 1
if retry >= 0 and verbose:
print("Retrying..")
return None
def GMLAN_BroadcastSocket(interface):
# type: (str) -> SuperSocket
""" Returns a GMLAN broadcast socket using interface.
:param interface: interface name
:return: ISOTPSocket configured as GMLAN Broadcast Socket
"""
return ISOTPSocket(interface, sid=0x101, did=0x0, basecls=GMLAN,
extended_addr=0xfe, padding=True)
| 1 | 19,553 | There's already a sleep right above | secdev-scapy | py |
@@ -1249,9 +1249,16 @@ class AssertionFinder
$if_types[$var_id] = [[$assertion->rule[0][0]]];
}
} elseif (\is_string($assertion->var_id)
- && $expr instanceof PhpParser\Node\Expr\MethodCall
+ && (
+ $expr instanceof PhpParser\Node\Expr\MethodCall
+ || $expr instanceof PhpParser\Node\Expr\StaticCall
+ )
) {
- $if_types[$assertion->var_id] = [[$assertion->rule[0][0]]];
+ $var_id = $assertion->var_id;
+ if (strpos($var_id, 'self::') === 0) {
+ $var_id = $this_class_name . '::' . substr($var_id, 6);
+ }
+ $if_types[$var_id] = [[$assertion->rule[0][0]]];
}
if ($if_types) { | 1 | <?php
namespace Psalm\Internal\Analyzer\Statements\Expression;
use PhpParser;
use Psalm\Codebase;
use Psalm\Internal\Analyzer\ClassLikeAnalyzer;
use Psalm\Internal\Analyzer\StatementsAnalyzer;
use Psalm\Internal\Type\Comparator\UnionTypeComparator;
use Psalm\CodeLocation;
use Psalm\FileSource;
use Psalm\Issue\DocblockTypeContradiction;
use Psalm\Issue\RedundantIdentityWithTrue;
use Psalm\Issue\RedundantCondition;
use Psalm\Issue\RedundantConditionGivenDocblockType;
use Psalm\Issue\TypeDoesNotContainNull;
use Psalm\Issue\TypeDoesNotContainType;
use Psalm\Issue\UnevaluatedCode;
use Psalm\IssueBuffer;
use Psalm\Type;
use function substr;
use function count;
use function strtolower;
use function in_array;
use function array_merge;
use function strpos;
use function is_int;
/**
* @internal
*/
class AssertionFinder
{
public const ASSIGNMENT_TO_RIGHT = 1;
public const ASSIGNMENT_TO_LEFT = -1;
/**
* Gets all the type assertions in a conditional
*
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
public static function scrapeAssertions(
PhpParser\Node\Expr $conditional,
?string $this_class_name,
FileSource $source,
?Codebase $codebase = null,
bool $inside_negation = false,
bool $cache = true,
bool $inside_conditional = true
): array {
$if_types = [];
if ($conditional instanceof PhpParser\Node\Expr\Instanceof_) {
$instanceof_types = self::getInstanceOfTypes($conditional, $this_class_name, $source);
if ($instanceof_types) {
$var_name = ExpressionIdentifier::getArrayVarId(
$conditional->expr,
$this_class_name,
$source
);
if ($var_name) {
$if_types[$var_name] = [$instanceof_types];
$var_type = $source instanceof StatementsAnalyzer
? $source->node_data->getType($conditional->expr)
: null;
foreach ($instanceof_types as $instanceof_type) {
if ($instanceof_type[0] === '=') {
$instanceof_type = substr($instanceof_type, 1);
}
if ($codebase
&& $var_type
&& $inside_negation
&& $source instanceof StatementsAnalyzer
) {
if ($codebase->interfaceExists($instanceof_type)) {
continue;
}
$instanceof_type = Type::parseString(
$instanceof_type,
null,
$source->getTemplateTypeMap() ?: []
);
if (!UnionTypeComparator::canExpressionTypesBeIdentical(
$codebase,
$instanceof_type,
$var_type
)) {
if ($var_type->from_docblock) {
if (IssueBuffer::accepts(
new RedundantConditionGivenDocblockType(
$var_type->getId() . ' does not contain '
. $instanceof_type->getId(),
new CodeLocation($source, $conditional),
$var_type->getId() . ' ' . $instanceof_type->getId()
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new RedundantCondition(
$var_type->getId() . ' cannot be identical to '
. $instanceof_type->getId(),
new CodeLocation($source, $conditional),
$var_type->getId() . ' ' . $instanceof_type->getId()
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
}
}
}
}
return $if_types ? [$if_types] : [];
}
if ($conditional instanceof PhpParser\Node\Expr\Assign) {
$var_name = ExpressionIdentifier::getArrayVarId(
$conditional->var,
$this_class_name,
$source
);
$candidate_if_types = $inside_conditional
? self::scrapeAssertions(
$conditional->expr,
$this_class_name,
$source,
$codebase,
$inside_negation,
$cache,
$inside_conditional
)
: [];
if ($var_name) {
if ($candidate_if_types) {
$if_types[$var_name] = [['>' . \json_encode($candidate_if_types[0])]];
} else {
$if_types[$var_name] = [['!falsy']];
}
}
return $if_types ? [$if_types] : [];
}
$var_name = ExpressionIdentifier::getArrayVarId(
$conditional,
$this_class_name,
$source
);
if ($var_name) {
$if_types[$var_name] = [['!falsy']];
if (!$conditional instanceof PhpParser\Node\Expr\MethodCall
&& !$conditional instanceof PhpParser\Node\Expr\StaticCall
) {
return [$if_types];
}
}
if ($conditional instanceof PhpParser\Node\Expr\BooleanNot) {
return [];
}
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical ||
$conditional instanceof PhpParser\Node\Expr\BinaryOp\Equal
) {
$and_types = self::scrapeEqualityAssertions(
$conditional,
$this_class_name,
$source,
$codebase,
false,
$cache,
$inside_conditional
);
return $and_types;
}
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\NotIdentical ||
$conditional instanceof PhpParser\Node\Expr\BinaryOp\NotEqual
) {
$and_types = self::scrapeInequalityAssertions(
$conditional,
$this_class_name,
$source,
$codebase,
false,
$cache,
$inside_conditional
);
return $and_types;
}
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\Greater
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\GreaterOrEqual
) {
$min_count = null;
$count_equality_position = self::hasNonEmptyCountEqualityCheck($conditional, $min_count);
$min_comparison = null;
$positive_number_position = self::hasPositiveNumberCheck($conditional, $min_comparison);
$max_count = null;
$count_inequality_position = self::hasLessThanCountEqualityCheck($conditional, $max_count);
if ($count_equality_position) {
if ($count_equality_position === self::ASSIGNMENT_TO_RIGHT) {
$counted_expr = $conditional->left;
} else {
throw new \UnexpectedValueException('$count_equality_position value');
}
/** @var PhpParser\Node\Expr\FuncCall $counted_expr */
$var_name = ExpressionIdentifier::getArrayVarId(
$counted_expr->args[0]->value,
$this_class_name,
$source
);
if ($var_name) {
if (self::hasReconcilableNonEmptyCountEqualityCheck($conditional)) {
$if_types[$var_name] = [['non-empty-countable']];
} else {
if ($min_count) {
$if_types[$var_name] = [['=has-at-least-' . $min_count]];
} else {
$if_types[$var_name] = [['=non-empty-countable']];
}
}
}
return $if_types ? [$if_types] : [];
}
if ($count_inequality_position) {
if ($count_inequality_position === self::ASSIGNMENT_TO_LEFT) {
$count_expr = $conditional->right;
} else {
throw new \UnexpectedValueException('$count_inequality_position value');
}
/** @var PhpParser\Node\Expr\FuncCall $count_expr */
$var_name = ExpressionIdentifier::getArrayVarId(
$count_expr->args[0]->value,
$this_class_name,
$source
);
if ($var_name) {
if ($max_count) {
$if_types[$var_name] = [['!has-at-least-' . ($max_count + 1)]];
} else {
$if_types[$var_name] = [['!non-empty-countable']];
}
}
return $if_types ? [$if_types] : [];
}
if ($positive_number_position) {
if ($positive_number_position === self::ASSIGNMENT_TO_RIGHT) {
$var_name = ExpressionIdentifier::getArrayVarId(
$conditional->left,
$this_class_name,
$source
);
$value_node = $conditional->left;
} else {
$var_name = ExpressionIdentifier::getArrayVarId(
$conditional->right,
$this_class_name,
$source
);
$value_node = $conditional->right;
}
if ($codebase
&& $source instanceof StatementsAnalyzer
&& ($var_type = $source->node_data->getType($value_node))
&& $var_type->isSingle()
&& $var_type->hasBool()
&& $min_comparison > 1
) {
if ($var_type->from_docblock) {
if (IssueBuffer::accepts(
new DocblockTypeContradiction(
$var_type . ' cannot be greater than ' . $min_comparison,
new CodeLocation($source, $conditional),
null
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new TypeDoesNotContainType(
$var_type . ' cannot be greater than ' . $min_comparison,
new CodeLocation($source, $conditional),
null
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
if ($var_name) {
$if_types[$var_name] = [[($min_comparison === 1 ? '' : '=') . 'positive-numeric']];
}
return $if_types ? [$if_types] : [];
}
return [];
}
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\Smaller
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\SmallerOrEqual
) {
$min_count = null;
$count_equality_position = self::hasNonEmptyCountEqualityCheck($conditional, $min_count);
$typed_value_position = self::hasTypedValueComparison($conditional, $source);
$max_count = null;
$count_inequality_position = self::hasLessThanCountEqualityCheck($conditional, $max_count);
if ($count_equality_position) {
if ($count_equality_position === self::ASSIGNMENT_TO_LEFT) {
$count_expr = $conditional->right;
} else {
throw new \UnexpectedValueException('$count_equality_position value');
}
/** @var PhpParser\Node\Expr\FuncCall $count_expr */
$var_name = ExpressionIdentifier::getArrayVarId(
$count_expr->args[0]->value,
$this_class_name,
$source
);
if ($var_name) {
if ($min_count) {
$if_types[$var_name] = [['=has-at-least-' . $min_count]];
} else {
$if_types[$var_name] = [['=non-empty-countable']];
}
}
return $if_types ? [$if_types] : [];
}
if ($count_inequality_position) {
if ($count_inequality_position === self::ASSIGNMENT_TO_RIGHT) {
$count_expr = $conditional->left;
} else {
throw new \UnexpectedValueException('$count_inequality_position value');
}
/** @var PhpParser\Node\Expr\FuncCall $count_expr */
$var_name = ExpressionIdentifier::getArrayVarId(
$count_expr->args[0]->value,
$this_class_name,
$source
);
if ($var_name) {
if ($max_count) {
$if_types[$var_name] = [['!has-at-least-' . ($max_count + 1)]];
} else {
$if_types[$var_name] = [['!non-empty-countable']];
}
}
return $if_types ? [$if_types] : [];
}
if ($typed_value_position) {
if ($typed_value_position === self::ASSIGNMENT_TO_RIGHT) {
$var_name = ExpressionIdentifier::getArrayVarId(
$conditional->left,
$this_class_name,
$source
);
$expr = $conditional->right;
} elseif ($typed_value_position === self::ASSIGNMENT_TO_LEFT) {
$var_name = ExpressionIdentifier::getArrayVarId(
$conditional->right,
$this_class_name,
$source
);
$expr = $conditional->left;
} else {
throw new \UnexpectedValueException('$typed_value_position value');
}
$expr_type = $source instanceof StatementsAnalyzer
? $source->node_data->getType($expr)
: null;
if ($var_name
&& $expr_type
&& $expr_type->isSingleIntLiteral()
&& ($expr_type->getSingleIntLiteral()->value === 0)
) {
$if_types[$var_name] = [['=isset']];
}
return $if_types ? [$if_types] : [];
}
return [];
}
if ($conditional instanceof PhpParser\Node\Expr\FuncCall) {
$and_types = self::processFunctionCall(
$conditional,
$this_class_name,
$source,
$codebase,
$inside_negation
);
return $and_types;
}
if ($conditional instanceof PhpParser\Node\Expr\MethodCall
|| $conditional instanceof PhpParser\Node\Expr\StaticCall
) {
$custom_assertions = self::processCustomAssertion($conditional, $this_class_name, $source);
if ($custom_assertions) {
return $custom_assertions;
}
return $if_types ? [$if_types] : [];
}
if ($conditional instanceof PhpParser\Node\Expr\Empty_) {
$var_name = ExpressionIdentifier::getArrayVarId(
$conditional->expr,
$this_class_name,
$source
);
if ($var_name) {
if ($conditional->expr instanceof PhpParser\Node\Expr\Variable
&& $source instanceof StatementsAnalyzer
&& ($var_type = $source->node_data->getType($conditional->expr))
&& !$var_type->isMixed()
&& !$var_type->possibly_undefined
) {
$if_types[$var_name] = [['falsy']];
} else {
$if_types[$var_name] = [['empty']];
}
}
return $if_types ? [$if_types] : [];
}
if ($conditional instanceof PhpParser\Node\Expr\Isset_) {
foreach ($conditional->vars as $isset_var) {
$var_name = ExpressionIdentifier::getArrayVarId(
$isset_var,
$this_class_name,
$source
);
if ($var_name) {
if ($isset_var instanceof PhpParser\Node\Expr\Variable
&& $source instanceof StatementsAnalyzer
&& ($var_type = $source->node_data->getType($isset_var))
&& !$var_type->isMixed()
&& !$var_type->possibly_undefined
&& !$var_type->possibly_undefined_from_try
&& $var_name !== '$_SESSION'
) {
$if_types[$var_name] = [['!null']];
} else {
$if_types[$var_name] = [['isset']];
}
} else {
// look for any variables we *can* use for an isset assertion
$array_root = $isset_var;
while ($array_root instanceof PhpParser\Node\Expr\ArrayDimFetch && !$var_name) {
$array_root = $array_root->var;
$var_name = ExpressionIdentifier::getArrayVarId(
$array_root,
$this_class_name,
$source
);
}
if ($var_name) {
$if_types[$var_name] = [['=isset']];
}
}
}
return $if_types ? [$if_types] : [];
}
return [];
}
/**
* @param PhpParser\Node\Expr\BinaryOp\Identical|PhpParser\Node\Expr\BinaryOp $conditional
*
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function scrapeEqualityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
FileSource $source,
?Codebase $codebase = null,
bool $inside_negation = false,
bool $cache = true,
bool $inside_conditional = true
): array {
$null_position = self::hasNullVariable($conditional, $source);
if ($null_position !== null) {
return self::getNullEqualityAssertions(
$conditional,
$this_class_name,
$source,
$codebase,
$null_position
);
}
$true_position = self::hasTrueVariable($conditional);
if ($true_position) {
return self::getTrueEqualityAssertions(
$conditional,
$this_class_name,
$source,
$codebase,
$inside_negation,
$cache,
$true_position
);
}
$false_position = self::hasFalseVariable($conditional);
if ($false_position) {
return self::getFalseEqualityAssertions(
$conditional,
$this_class_name,
$source,
$codebase,
$inside_negation,
$cache,
$inside_conditional,
$false_position
);
}
$empty_array_position = self::hasEmptyArrayVariable($conditional);
if ($empty_array_position !== null) {
return self::getEmptyArrayEqualityAssertions(
$conditional,
$this_class_name,
$source,
$codebase,
$empty_array_position
);
}
$gettype_position = self::hasGetTypeCheck($conditional);
if ($gettype_position) {
return self::getGettypeEqualityAssertions(
$conditional,
$this_class_name,
$source,
$gettype_position
);
}
$get_debug_type_position = self::hasGetDebugTypeCheck($conditional);
if ($get_debug_type_position) {
return self::getGetdebugtypeEqualityAssertions(
$conditional,
$this_class_name,
$source,
$get_debug_type_position
);
}
$min_count = null;
$count_equality_position = self::hasNonEmptyCountEqualityCheck($conditional, $min_count);
if ($count_equality_position) {
$if_types = [];
if ($count_equality_position === self::ASSIGNMENT_TO_RIGHT) {
$count_expr = $conditional->left;
} elseif ($count_equality_position === self::ASSIGNMENT_TO_LEFT) {
$count_expr = $conditional->right;
} else {
throw new \UnexpectedValueException('$count_equality_position value');
}
/** @var PhpParser\Node\Expr\FuncCall $count_expr */
$var_name = ExpressionIdentifier::getArrayVarId(
$count_expr->args[0]->value,
$this_class_name,
$source
);
if ($var_name) {
if ($min_count) {
$if_types[$var_name] = [['=has-at-least-' . $min_count]];
} else {
$if_types[$var_name] = [['=non-empty-countable']];
}
}
return $if_types ? [$if_types] : [];
}
if (!$source instanceof StatementsAnalyzer) {
return [];
}
$getclass_position = self::hasGetClassCheck($conditional, $source);
if ($getclass_position) {
return self::getGetclassEqualityAssertions(
$conditional,
$this_class_name,
$source,
$getclass_position
);
}
$typed_value_position = self::hasTypedValueComparison($conditional, $source);
if ($typed_value_position) {
return self::getTypedValueEqualityAssertions(
$conditional,
$this_class_name,
$source,
$codebase,
$typed_value_position
);
}
$var_type = $source->node_data->getType($conditional->left);
$other_type = $source->node_data->getType($conditional->right);
if ($codebase
&& $var_type
&& $other_type
&& $conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical
) {
if (!UnionTypeComparator::canExpressionTypesBeIdentical($codebase, $var_type, $other_type)) {
if (IssueBuffer::accepts(
new TypeDoesNotContainType(
$var_type->getId() . ' cannot be identical to ' . $other_type->getId(),
new CodeLocation($source, $conditional),
$var_type->getId() . ' ' . $other_type->getId()
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
return [];
}
/**
* @param PhpParser\Node\Expr\BinaryOp\NotIdentical|PhpParser\Node\Expr\BinaryOp $conditional
*
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function scrapeInequalityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
FileSource $source,
?Codebase $codebase = null,
bool $inside_negation = false,
bool $cache = true,
bool $inside_conditional = true
): array {
$null_position = self::hasNullVariable($conditional, $source);
if ($null_position !== null) {
return self::getNullInequalityAssertions(
$conditional,
$source,
$this_class_name,
$codebase,
$null_position
);
}
$false_position = self::hasFalseVariable($conditional);
if ($false_position) {
return self::getFalseInequalityAssertions(
$conditional,
$cache,
$this_class_name,
$source,
$inside_conditional,
$codebase,
$inside_negation,
$false_position
);
}
$true_position = self::hasTrueVariable($conditional);
if ($true_position) {
return self::getTrueInequalityAssertions(
$true_position,
$conditional,
$this_class_name,
$source,
$codebase,
$inside_negation,
$cache,
$inside_conditional
);
}
$count = null;
$count_inequality_position = self::hasNotCountEqualityCheck($conditional, $count);
if ($count_inequality_position) {
$if_types = [];
if ($count_inequality_position === self::ASSIGNMENT_TO_RIGHT) {
$count_expr = $conditional->left;
} elseif ($count_inequality_position === self::ASSIGNMENT_TO_LEFT) {
$count_expr = $conditional->right;
} else {
throw new \UnexpectedValueException('$count_equality_position value');
}
/** @var PhpParser\Node\Expr\FuncCall $count_expr */
$var_name = ExpressionIdentifier::getArrayVarId(
$count_expr->args[0]->value,
$this_class_name,
$source
);
if ($var_name) {
if ($count) {
$if_types[$var_name] = [['!has-exactly-' . $count]];
} else {
$if_types[$var_name] = [['non-empty-countable']];
}
}
return $if_types ? [$if_types] : [];
}
$empty_array_position = self::hasEmptyArrayVariable($conditional);
if ($empty_array_position !== null) {
return self::getEmptyInequalityAssertions(
$conditional,
$this_class_name,
$source,
$codebase,
$empty_array_position
);
}
$gettype_position = self::hasGetTypeCheck($conditional);
if ($gettype_position) {
return self::getGettypeInequalityAssertions(
$conditional,
$this_class_name,
$source,
$gettype_position
);
}
$get_debug_type_position = self::hasGetDebugTypeCheck($conditional);
if ($get_debug_type_position) {
return self::getGetdebugTypeInequalityAssertions(
$conditional,
$this_class_name,
$source,
$get_debug_type_position
);
}
if (!$source instanceof StatementsAnalyzer) {
return [];
}
$getclass_position = self::hasGetClassCheck($conditional, $source);
if ($getclass_position) {
return self::getGetclassInequalityAssertions(
$conditional,
$this_class_name,
$source,
$getclass_position
);
}
$typed_value_position = self::hasTypedValueComparison($conditional, $source);
if ($typed_value_position) {
return self::getTypedValueInequalityAssertions(
$conditional,
$this_class_name,
$source,
$codebase,
$typed_value_position
);
}
return [];
}
/**
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
public static function processFunctionCall(
PhpParser\Node\Expr\FuncCall $expr,
?string $this_class_name,
FileSource $source,
?Codebase $codebase = null,
bool $negate = false
): array {
$first_var_name = isset($expr->args[0]->value)
? ExpressionIdentifier::getArrayVarId(
$expr->args[0]->value,
$this_class_name,
$source
)
: null;
$if_types = [];
$first_var_type = isset($expr->args[0]->value)
&& $source instanceof StatementsAnalyzer
? $source->node_data->getType($expr->args[0]->value)
: null;
if (self::hasNullCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['null']];
}
} elseif ($source instanceof StatementsAnalyzer && self::hasIsACheck($expr, $source)) {
return self::getIsaAssertions($expr, $source, $this_class_name, $first_var_name);
} elseif (self::hasArrayCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['array']];
} elseif ($first_var_type
&& $codebase
&& $source instanceof StatementsAnalyzer
) {
self::processIrreconcilableFunctionCall(
$first_var_type,
Type::getArray(),
$expr,
$source,
$codebase,
$negate
);
}
} elseif (self::hasBoolCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['bool']];
} elseif ($first_var_type
&& $codebase
&& $source instanceof StatementsAnalyzer
) {
self::processIrreconcilableFunctionCall(
$first_var_type,
Type::getBool(),
$expr,
$source,
$codebase,
$negate
);
}
} elseif (self::hasStringCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['string']];
} elseif ($first_var_type
&& $codebase
&& $source instanceof StatementsAnalyzer
) {
self::processIrreconcilableFunctionCall(
$first_var_type,
Type::getString(),
$expr,
$source,
$codebase,
$negate
);
}
} elseif (self::hasObjectCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['object']];
} elseif ($first_var_type
&& $codebase
&& $source instanceof StatementsAnalyzer
) {
self::processIrreconcilableFunctionCall(
$first_var_type,
Type::getObject(),
$expr,
$source,
$codebase,
$negate
);
}
} elseif (self::hasNumericCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['numeric']];
} elseif ($first_var_type
&& $codebase
&& $source instanceof StatementsAnalyzer
) {
self::processIrreconcilableFunctionCall(
$first_var_type,
Type::getNumeric(),
$expr,
$source,
$codebase,
$negate
);
}
} elseif (self::hasIntCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['int']];
} elseif ($first_var_type
&& $codebase
&& $source instanceof StatementsAnalyzer
) {
self::processIrreconcilableFunctionCall(
$first_var_type,
Type::getInt(),
$expr,
$source,
$codebase,
$negate
);
}
} elseif (self::hasFloatCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['float']];
} elseif ($first_var_type
&& $codebase
&& $source instanceof StatementsAnalyzer
) {
self::processIrreconcilableFunctionCall(
$first_var_type,
Type::getFloat(),
$expr,
$source,
$codebase,
$negate
);
}
} elseif (self::hasResourceCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['resource']];
} elseif ($first_var_type
&& $codebase
&& $source instanceof StatementsAnalyzer
) {
self::processIrreconcilableFunctionCall(
$first_var_type,
Type::getResource(),
$expr,
$source,
$codebase,
$negate
);
}
} elseif (self::hasScalarCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['scalar']];
} elseif ($first_var_type
&& $codebase
&& $source instanceof StatementsAnalyzer
) {
self::processIrreconcilableFunctionCall(
$first_var_type,
Type::getScalar(),
$expr,
$source,
$codebase,
$negate
);
}
} elseif (self::hasCallableCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['callable']];
} elseif ($expr->args[0]->value instanceof PhpParser\Node\Expr\Array_
&& isset($expr->args[0]->value->items[0], $expr->args[0]->value->items[1])
&& $expr->args[0]->value->items[1]->value instanceof PhpParser\Node\Scalar\String_
) {
$first_var_name_in_array_argument = ExpressionIdentifier::getArrayVarId(
$expr->args[0]->value->items[0]->value,
$this_class_name,
$source
);
if ($first_var_name_in_array_argument) {
$if_types[$first_var_name_in_array_argument] = [
['hasmethod-' . $expr->args[0]->value->items[1]->value->value]
];
}
}
} elseif (self::hasIterableCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['iterable']];
}
} elseif (self::hasCountableCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['countable']];
}
} elseif ($class_exists_check_type = self::hasClassExistsCheck($expr)) {
if ($first_var_name) {
$class_string_type = ($class_exists_check_type === 1 ? 'loaded-' : '') . 'class-string';
$if_types[$first_var_name] = [[$class_string_type]];
}
} elseif ($class_exists_check_type = self::hasTraitExistsCheck($expr)) {
if ($first_var_name) {
if ($class_exists_check_type === 2) {
$if_types[$first_var_name] = [['trait-string']];
} else {
$if_types[$first_var_name] = [['=trait-string']];
}
}
} elseif (self::hasInterfaceExistsCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['interface-string']];
}
} elseif (self::hasFunctionExistsCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['callable-string']];
}
} elseif ($expr->name instanceof PhpParser\Node\Name
&& strtolower($expr->name->parts[0]) === 'method_exists'
&& isset($expr->args[1])
&& $expr->args[1]->value instanceof PhpParser\Node\Scalar\String_
) {
if ($first_var_name) {
$if_types[$first_var_name] = [['hasmethod-' . $expr->args[1]->value->value]];
}
} elseif (self::hasInArrayCheck($expr) && $source instanceof StatementsAnalyzer) {
return self::getInarrayAssertions($expr, $source, $first_var_name);
} elseif (self::hasArrayKeyExistsCheck($expr)) {
return self::getArrayKeyExistsAssertions(
$expr,
$first_var_type,
$first_var_name,
$source,
$this_class_name
);
} elseif (self::hasNonEmptyCountCheck($expr)) {
if ($first_var_name) {
$if_types[$first_var_name] = [['non-empty-countable']];
}
} else {
return self::processCustomAssertion($expr, $this_class_name, $source);
}
return $if_types ? [$if_types] : [];
}
private static function processIrreconcilableFunctionCall(
Type\Union $first_var_type,
Type\Union $expected_type,
PhpParser\Node\Expr $expr,
StatementsAnalyzer $source,
Codebase $codebase,
bool $negate
) : void {
if ($first_var_type->hasMixed()) {
return;
}
if (!UnionTypeComparator::isContainedBy(
$codebase,
$first_var_type,
$expected_type
)) {
return;
}
if (!$negate) {
if ($first_var_type->from_docblock) {
if (IssueBuffer::accepts(
new RedundantConditionGivenDocblockType(
'Docblock type ' . $first_var_type . ' always contains ' . $expected_type,
new CodeLocation($source, $expr),
$first_var_type . ' ' . $expected_type
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new RedundantCondition(
$first_var_type . ' always contains ' . $expected_type,
new CodeLocation($source, $expr),
$first_var_type . ' ' . $expected_type
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
} else {
if ($first_var_type->from_docblock) {
if (IssueBuffer::accepts(
new DocblockTypeContradiction(
$first_var_type . ' does not contain ' . $expected_type,
new CodeLocation($source, $expr),
$first_var_type . ' ' . $expected_type
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new TypeDoesNotContainType(
$first_var_type . ' does not contain ' . $expected_type,
new CodeLocation($source, $expr),
$first_var_type . ' ' . $expected_type
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
}
/**
* @param PhpParser\Node\Expr\FuncCall|PhpParser\Node\Expr\MethodCall|PhpParser\Node\Expr\StaticCall $expr
*
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
protected static function processCustomAssertion(
PhpParser\Node\Expr $expr,
?string $this_class_name,
FileSource $source
): array {
if (!$source instanceof StatementsAnalyzer) {
return [];
}
$if_true_assertions = $source->node_data->getIfTrueAssertions($expr);
$if_false_assertions = $source->node_data->getIfFalseAssertions($expr);
if ($if_true_assertions === null && $if_false_assertions === null) {
return [];
}
$first_var_name = isset($expr->args[0]->value)
? ExpressionIdentifier::getArrayVarId(
$expr->args[0]->value,
$this_class_name,
$source
)
: null;
$anded_types = [];
if ($if_true_assertions) {
foreach ($if_true_assertions as $assertion) {
$if_types = [];
$assertion = clone $assertion;
foreach ($assertion->rule as $i => $and_rules) {
foreach ($and_rules as $j => $rule) {
if (strpos($rule, 'scalar-class-constant(') === 0) {
$codebase = $source->getCodebase();
$assertion->rule[$i][$j] = \Psalm\Internal\Type\TypeExpander::expandUnion(
$codebase,
Type::parseString(substr($rule, 22, -1)),
null,
null,
null
)->getId();
}
}
}
if (is_int($assertion->var_id) && isset($expr->args[$assertion->var_id])) {
if ($assertion->var_id === 0) {
$var_name = $first_var_name;
} else {
$var_name = ExpressionIdentifier::getArrayVarId(
$expr->args[$assertion->var_id]->value,
$this_class_name,
$source
);
}
if ($var_name) {
$if_types[$var_name] = [[$assertion->rule[0][0]]];
}
} elseif ($assertion->var_id === '$this' && $expr instanceof PhpParser\Node\Expr\MethodCall) {
$var_id = ExpressionIdentifier::getArrayVarId(
$expr->var,
$this_class_name,
$source
);
if ($var_id) {
$if_types[$var_id] = [[$assertion->rule[0][0]]];
}
} elseif (\is_string($assertion->var_id)
&& $expr instanceof PhpParser\Node\Expr\MethodCall
) {
$if_types[$assertion->var_id] = [[$assertion->rule[0][0]]];
}
if ($if_types) {
$anded_types[] = $if_types;
}
}
}
if ($if_false_assertions) {
foreach ($if_false_assertions as $assertion) {
$if_types = [];
$assertion = clone $assertion;
foreach ($assertion->rule as $i => $and_rules) {
foreach ($and_rules as $j => $rule) {
if (strpos($rule, 'scalar-class-constant(') === 0) {
$codebase = $source->getCodebase();
$assertion->rule[$i][$j] = \Psalm\Internal\Type\TypeExpander::expandUnion(
$codebase,
Type::parseString(substr($rule, 22, -1)),
null,
null,
null
)->getId();
}
}
}
if (is_int($assertion->var_id) && isset($expr->args[$assertion->var_id])) {
if ($assertion->var_id === 0) {
$var_name = $first_var_name;
} else {
$var_name = ExpressionIdentifier::getArrayVarId(
$expr->args[$assertion->var_id]->value,
$this_class_name,
$source
);
}
if ($var_name) {
if ('!' === $assertion->rule[0][0][0]) {
$if_types[$var_name] = [[substr($assertion->rule[0][0], 1)]];
} else {
$if_types[$var_name] = [['!' . $assertion->rule[0][0]]];
}
}
} elseif ($assertion->var_id === '$this' && $expr instanceof PhpParser\Node\Expr\MethodCall) {
$var_id = ExpressionIdentifier::getArrayVarId(
$expr->var,
$this_class_name,
$source
);
if ($var_id) {
if ('!' === $assertion->rule[0][0][0]) {
$if_types[$var_id] = [[substr($assertion->rule[0][0], 1)]];
} else {
$if_types[$var_id] = [['!' . $assertion->rule[0][0]]];
}
}
} elseif (\is_string($assertion->var_id)
&& $expr instanceof PhpParser\Node\Expr\MethodCall
) {
$if_types[$assertion->var_id] = [['!' . $assertion->rule[0][0]]];
}
if ($if_types) {
$anded_types[] = $if_types;
}
}
}
return $anded_types;
}
/**
* @return list<string>
*/
protected static function getInstanceOfTypes(
PhpParser\Node\Expr\Instanceof_ $stmt,
?string $this_class_name,
FileSource $source
): array {
if ($stmt->class instanceof PhpParser\Node\Name) {
if (!in_array(strtolower($stmt->class->parts[0]), ['self', 'static', 'parent'], true)) {
$instanceof_class = ClassLikeAnalyzer::getFQCLNFromNameObject(
$stmt->class,
$source->getAliases()
);
if ($source instanceof StatementsAnalyzer) {
$codebase = $source->getCodebase();
$instanceof_class = $codebase->classlikes->getUnAliasedName($instanceof_class);
}
return [$instanceof_class];
} elseif ($this_class_name
&& (in_array(strtolower($stmt->class->parts[0]), ['self', 'static'], true))
) {
if ($stmt->class->parts[0] === 'static') {
return ['=' . $this_class_name . '&static'];
}
return [$this_class_name];
}
} elseif ($source instanceof StatementsAnalyzer) {
$stmt_class_type = $source->node_data->getType($stmt->class);
if ($stmt_class_type) {
$literal_class_strings = [];
foreach ($stmt_class_type->getAtomicTypes() as $atomic_type) {
if ($atomic_type instanceof Type\Atomic\TLiteralClassString) {
$literal_class_strings[] = $atomic_type->value;
} elseif ($atomic_type instanceof Type\Atomic\TTemplateParamClass) {
$literal_class_strings[] = $atomic_type->param_name;
}
}
return $literal_class_strings;
}
}
return [];
}
protected static function hasNullVariable(
PhpParser\Node\Expr\BinaryOp $conditional,
FileSource $source
): ?int {
if ($conditional->right instanceof PhpParser\Node\Expr\ConstFetch
&& strtolower($conditional->right->name->parts[0]) === 'null'
) {
return self::ASSIGNMENT_TO_RIGHT;
}
if ($conditional->left instanceof PhpParser\Node\Expr\ConstFetch
&& strtolower($conditional->left->name->parts[0]) === 'null'
) {
return self::ASSIGNMENT_TO_LEFT;
}
if ($source instanceof StatementsAnalyzer
&& ($right_type = $source->node_data->getType($conditional->right))
&& $right_type->isNull()
) {
return self::ASSIGNMENT_TO_RIGHT;
}
return null;
}
public static function hasFalseVariable(
PhpParser\Node\Expr\BinaryOp $conditional
): ?int {
if ($conditional->right instanceof PhpParser\Node\Expr\ConstFetch
&& strtolower($conditional->right->name->parts[0]) === 'false'
) {
return self::ASSIGNMENT_TO_RIGHT;
}
if ($conditional->left instanceof PhpParser\Node\Expr\ConstFetch
&& strtolower($conditional->left->name->parts[0]) === 'false'
) {
return self::ASSIGNMENT_TO_LEFT;
}
return null;
}
public static function hasTrueVariable(
PhpParser\Node\Expr\BinaryOp $conditional
): ?int {
if ($conditional->right instanceof PhpParser\Node\Expr\ConstFetch
&& strtolower($conditional->right->name->parts[0]) === 'true'
) {
return self::ASSIGNMENT_TO_RIGHT;
}
if ($conditional->left instanceof PhpParser\Node\Expr\ConstFetch
&& strtolower($conditional->left->name->parts[0]) === 'true'
) {
return self::ASSIGNMENT_TO_LEFT;
}
return null;
}
protected static function hasEmptyArrayVariable(
PhpParser\Node\Expr\BinaryOp $conditional
): ?int {
if ($conditional->right instanceof PhpParser\Node\Expr\Array_
&& !$conditional->right->items
) {
return self::ASSIGNMENT_TO_RIGHT;
}
if ($conditional->left instanceof PhpParser\Node\Expr\Array_
&& !$conditional->left->items
) {
return self::ASSIGNMENT_TO_LEFT;
}
return null;
}
/**
* @return false|int
*/
protected static function hasGetTypeCheck(
PhpParser\Node\Expr\BinaryOp $conditional
) {
if ($conditional->right instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->right->name instanceof PhpParser\Node\Name
&& strtolower($conditional->right->name->parts[0]) === 'gettype'
&& $conditional->right->args
&& $conditional->left instanceof PhpParser\Node\Scalar\String_
) {
return self::ASSIGNMENT_TO_RIGHT;
}
if ($conditional->left instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->left->name instanceof PhpParser\Node\Name
&& strtolower($conditional->left->name->parts[0]) === 'gettype'
&& $conditional->left->args
&& $conditional->right instanceof PhpParser\Node\Scalar\String_
) {
return self::ASSIGNMENT_TO_LEFT;
}
return false;
}
/**
* @return false|int
*/
protected static function hasGetDebugTypeCheck(
PhpParser\Node\Expr\BinaryOp $conditional
) {
if ($conditional->right instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->right->name instanceof PhpParser\Node\Name
&& strtolower($conditional->right->name->parts[0]) === 'get_debug_type'
&& $conditional->right->args
&& ($conditional->left instanceof PhpParser\Node\Scalar\String_
|| $conditional->left instanceof PhpParser\Node\Expr\ClassConstFetch)
) {
return self::ASSIGNMENT_TO_RIGHT;
}
if ($conditional->left instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->left->name instanceof PhpParser\Node\Name
&& strtolower($conditional->left->name->parts[0]) === 'get_debug_type'
&& $conditional->left->args
&& ($conditional->right instanceof PhpParser\Node\Scalar\String_
|| $conditional->right instanceof PhpParser\Node\Expr\ClassConstFetch)
) {
return self::ASSIGNMENT_TO_LEFT;
}
return false;
}
/**
* @return false|int
*/
protected static function hasGetClassCheck(
PhpParser\Node\Expr\BinaryOp $conditional,
FileSource $source
) {
if (!$source instanceof StatementsAnalyzer) {
return false;
}
$right_get_class = $conditional->right instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->right->name instanceof PhpParser\Node\Name
&& strtolower($conditional->right->name->parts[0]) === 'get_class';
$right_static_class = $conditional->right instanceof PhpParser\Node\Expr\ClassConstFetch
&& $conditional->right->class instanceof PhpParser\Node\Name
&& $conditional->right->class->parts === ['static']
&& $conditional->right->name instanceof PhpParser\Node\Identifier
&& strtolower($conditional->right->name->name) === 'class';
$left_class_string = $conditional->left instanceof PhpParser\Node\Expr\ClassConstFetch
&& $conditional->left->class instanceof PhpParser\Node\Name
&& $conditional->left->name instanceof PhpParser\Node\Identifier
&& strtolower($conditional->left->name->name) === 'class';
$left_type = $source->node_data->getType($conditional->left);
$left_class_string_t = false;
if ($left_type && $left_type->isSingle()) {
foreach ($left_type->getAtomicTypes() as $type_part) {
if ($type_part instanceof Type\Atomic\TClassString) {
$left_class_string_t = true;
break;
}
}
}
if (($right_get_class || $right_static_class) && ($left_class_string || $left_class_string_t)) {
return self::ASSIGNMENT_TO_RIGHT;
}
$left_get_class = $conditional->left instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->left->name instanceof PhpParser\Node\Name
&& strtolower($conditional->left->name->parts[0]) === 'get_class';
$left_static_class = $conditional->left instanceof PhpParser\Node\Expr\ClassConstFetch
&& $conditional->left->class instanceof PhpParser\Node\Name
&& $conditional->left->class->parts === ['static']
&& $conditional->left->name instanceof PhpParser\Node\Identifier
&& strtolower($conditional->left->name->name) === 'class';
$right_class_string = $conditional->right instanceof PhpParser\Node\Expr\ClassConstFetch
&& $conditional->right->class instanceof PhpParser\Node\Name
&& $conditional->right->name instanceof PhpParser\Node\Identifier
&& strtolower($conditional->right->name->name) === 'class';
$right_type = $source->node_data->getType($conditional->right);
$right_class_string_t = false;
if ($right_type && $right_type->isSingle()) {
foreach ($right_type->getAtomicTypes() as $type_part) {
if ($type_part instanceof Type\Atomic\TClassString) {
$right_class_string_t = true;
break;
}
}
}
if (($left_get_class || $left_static_class) && ($right_class_string || $right_class_string_t)) {
return self::ASSIGNMENT_TO_LEFT;
}
return false;
}
/**
* @return false|int
*/
protected static function hasNonEmptyCountEqualityCheck(
PhpParser\Node\Expr\BinaryOp $conditional,
?int &$min_count
) {
$left_count = $conditional->left instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->left->name instanceof PhpParser\Node\Name
&& strtolower($conditional->left->name->parts[0]) === 'count'
&& $conditional->left->args;
$operator_greater_than_or_equal =
$conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Equal
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Greater
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\GreaterOrEqual;
if ($left_count
&& $conditional->right instanceof PhpParser\Node\Scalar\LNumber
&& $operator_greater_than_or_equal
&& $conditional->right->value >= (
$conditional instanceof PhpParser\Node\Expr\BinaryOp\Greater
? 0
: 1
)
) {
$min_count = $conditional->right->value +
($conditional instanceof PhpParser\Node\Expr\BinaryOp\Greater ? 1 : 0);
return self::ASSIGNMENT_TO_RIGHT;
}
$right_count = $conditional->right instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->right->name instanceof PhpParser\Node\Name
&& strtolower($conditional->right->name->parts[0]) === 'count'
&& $conditional->right->args;
$operator_less_than_or_equal =
$conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Equal
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Smaller
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\SmallerOrEqual;
if ($right_count
&& $conditional->left instanceof PhpParser\Node\Scalar\LNumber
&& $operator_less_than_or_equal
&& $conditional->left->value >= (
$conditional instanceof PhpParser\Node\Expr\BinaryOp\Smaller ? 0 : 1
)
) {
$min_count = $conditional->left->value +
($conditional instanceof PhpParser\Node\Expr\BinaryOp\Smaller ? 1 : 0);
return self::ASSIGNMENT_TO_LEFT;
}
return false;
}
/**
* @return false|int
*/
protected static function hasLessThanCountEqualityCheck(
PhpParser\Node\Expr\BinaryOp $conditional,
?int &$max_count
) {
$left_count = $conditional->left instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->left->name instanceof PhpParser\Node\Name
&& strtolower($conditional->left->name->parts[0]) === 'count'
&& $conditional->left->args;
$operator_less_than_or_equal =
$conditional instanceof PhpParser\Node\Expr\BinaryOp\SmallerOrEqual
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Smaller;
if ($left_count
&& $operator_less_than_or_equal
&& $conditional->right instanceof PhpParser\Node\Scalar\LNumber
) {
$max_count = $conditional->right->value -
($conditional instanceof PhpParser\Node\Expr\BinaryOp\Smaller ? 1 : 0);
return self::ASSIGNMENT_TO_RIGHT;
}
$right_count = $conditional->right instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->right->name instanceof PhpParser\Node\Name
&& strtolower($conditional->right->name->parts[0]) === 'count'
&& $conditional->right->args;
$operator_greater_than_or_equal =
$conditional instanceof PhpParser\Node\Expr\BinaryOp\GreaterOrEqual
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Greater;
if ($right_count
&& $operator_greater_than_or_equal
&& $conditional->left instanceof PhpParser\Node\Scalar\LNumber
) {
$max_count = $conditional->left->value -
($conditional instanceof PhpParser\Node\Expr\BinaryOp\Greater ? 1 : 0);
return self::ASSIGNMENT_TO_LEFT;
}
return false;
}
/**
* @param PhpParser\Node\Expr\BinaryOp\NotIdentical|PhpParser\Node\Expr\BinaryOp $conditional
*
* @return false|int
*/
protected static function hasNotCountEqualityCheck(
PhpParser\Node\Expr\BinaryOp $conditional,
?int &$count
) {
$left_count = $conditional->left instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->left->name instanceof PhpParser\Node\Name
&& strtolower($conditional->left->name->parts[0]) === 'count'
&& $conditional->left->args;
if ($left_count && $conditional->right instanceof PhpParser\Node\Scalar\LNumber) {
$count = $conditional->right->value;
return self::ASSIGNMENT_TO_RIGHT;
}
$right_count = $conditional->right instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->right->name instanceof PhpParser\Node\Name
&& strtolower($conditional->right->name->parts[0]) === 'count'
&& $conditional->right->args;
if ($right_count && $conditional->left instanceof PhpParser\Node\Scalar\LNumber) {
$count = $conditional->left->value;
return self::ASSIGNMENT_TO_LEFT;
}
return false;
}
/**
* @return false|int
*/
protected static function hasPositiveNumberCheck(
PhpParser\Node\Expr\BinaryOp $conditional,
?int &$min_count
) {
$operator_greater_than_or_equal =
$conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Equal
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Greater
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\GreaterOrEqual;
if ($conditional->right instanceof PhpParser\Node\Scalar\LNumber
&& $operator_greater_than_or_equal
&& $conditional->right->value >= (
$conditional instanceof PhpParser\Node\Expr\BinaryOp\Greater
? 0
: 1
)
) {
$min_count = $conditional->right->value +
($conditional instanceof PhpParser\Node\Expr\BinaryOp\Greater ? 1 : 0);
return self::ASSIGNMENT_TO_RIGHT;
}
$operator_less_than_or_equal =
$conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Equal
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Smaller
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\SmallerOrEqual;
if ($conditional->left instanceof PhpParser\Node\Scalar\LNumber
&& $operator_less_than_or_equal
&& $conditional->left->value >= (
$conditional instanceof PhpParser\Node\Expr\BinaryOp\Smaller ? 0 : 1
)
) {
$min_count = $conditional->left->value +
($conditional instanceof PhpParser\Node\Expr\BinaryOp\Smaller ? 1 : 0);
return self::ASSIGNMENT_TO_LEFT;
}
return false;
}
/**
* @return false|int
*/
protected static function hasReconcilableNonEmptyCountEqualityCheck(
PhpParser\Node\Expr\BinaryOp $conditional
) {
$left_count = $conditional->left instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->left->name instanceof PhpParser\Node\Name
&& strtolower($conditional->left->name->parts[0]) === 'count';
$right_number = $conditional->right instanceof PhpParser\Node\Scalar\LNumber
&& $conditional->right->value === (
$conditional instanceof PhpParser\Node\Expr\BinaryOp\Greater ? 0 : 1);
$operator_greater_than_or_equal =
$conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Equal
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Greater
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\GreaterOrEqual;
if ($left_count && $right_number && $operator_greater_than_or_equal) {
return self::ASSIGNMENT_TO_RIGHT;
}
$right_count = $conditional->right instanceof PhpParser\Node\Expr\FuncCall
&& $conditional->right->name instanceof PhpParser\Node\Name
&& strtolower($conditional->right->name->parts[0]) === 'count';
$left_number = $conditional->left instanceof PhpParser\Node\Scalar\LNumber
&& $conditional->left->value === (
$conditional instanceof PhpParser\Node\Expr\BinaryOp\Smaller ? 0 : 1);
$operator_less_than_or_equal =
$conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Equal
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\Smaller
|| $conditional instanceof PhpParser\Node\Expr\BinaryOp\SmallerOrEqual;
if ($right_count && $left_number && $operator_less_than_or_equal) {
return self::ASSIGNMENT_TO_LEFT;
}
return false;
}
/**
* @return false|int
*/
protected static function hasTypedValueComparison(
PhpParser\Node\Expr\BinaryOp $conditional,
FileSource $source
) {
if (!$source instanceof StatementsAnalyzer) {
return false;
}
if (($right_type = $source->node_data->getType($conditional->right))
&& ((!$conditional->right instanceof PhpParser\Node\Expr\Variable
&& !$conditional->right instanceof PhpParser\Node\Expr\PropertyFetch
&& !$conditional->right instanceof PhpParser\Node\Expr\StaticPropertyFetch)
|| $conditional->left instanceof PhpParser\Node\Expr\Variable
|| $conditional->left instanceof PhpParser\Node\Expr\PropertyFetch
|| $conditional->left instanceof PhpParser\Node\Expr\StaticPropertyFetch)
&& count($right_type->getAtomicTypes()) === 1
&& !$right_type->hasMixed()
) {
return self::ASSIGNMENT_TO_RIGHT;
}
if (($left_type = $source->node_data->getType($conditional->left))
&& !$conditional->left instanceof PhpParser\Node\Expr\Variable
&& !$conditional->left instanceof PhpParser\Node\Expr\PropertyFetch
&& !$conditional->left instanceof PhpParser\Node\Expr\StaticPropertyFetch
&& count($left_type->getAtomicTypes()) === 1
&& !$left_type->hasMixed()
) {
return self::ASSIGNMENT_TO_LEFT;
}
return false;
}
protected static function hasNullCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name && strtolower($stmt->name->parts[0]) === 'is_null') {
return true;
}
return false;
}
protected static function hasIsACheck(
PhpParser\Node\Expr\FuncCall $stmt,
StatementsAnalyzer $source
): bool {
if ($stmt->name instanceof PhpParser\Node\Name
&& (strtolower($stmt->name->parts[0]) === 'is_a'
|| strtolower($stmt->name->parts[0]) === 'is_subclass_of')
&& isset($stmt->args[1])
) {
$second_arg = $stmt->args[1]->value;
if ($second_arg instanceof PhpParser\Node\Scalar\String_
|| (
$second_arg instanceof PhpParser\Node\Expr\ClassConstFetch
&& $second_arg->class instanceof PhpParser\Node\Name
&& $second_arg->name instanceof PhpParser\Node\Identifier
&& strtolower($second_arg->name->name) === 'class'
)
|| (($second_arg_type = $source->node_data->getType($second_arg))
&& $second_arg_type->hasString())
) {
return true;
}
}
return false;
}
protected static function hasArrayCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name && strtolower($stmt->name->parts[0]) === 'is_array') {
return true;
}
return false;
}
protected static function hasStringCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name && strtolower($stmt->name->parts[0]) === 'is_string') {
return true;
}
return false;
}
protected static function hasBoolCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name && strtolower($stmt->name->parts[0]) === 'is_bool') {
return true;
}
return false;
}
protected static function hasObjectCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name && $stmt->name->parts === ['is_object']) {
return true;
}
return false;
}
protected static function hasNumericCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name && $stmt->name->parts === ['is_numeric']) {
return true;
}
return false;
}
protected static function hasIterableCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name && strtolower($stmt->name->parts[0]) === 'is_iterable') {
return true;
}
return false;
}
protected static function hasCountableCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name && strtolower($stmt->name->parts[0]) === 'is_countable') {
return true;
}
return false;
}
/**
* @return 0|1|2
*/
protected static function hasClassExistsCheck(PhpParser\Node\Expr\FuncCall $stmt): int
{
if ($stmt->name instanceof PhpParser\Node\Name
&& strtolower($stmt->name->parts[0]) === 'class_exists'
) {
if (!isset($stmt->args[1])) {
return 2;
}
$second_arg = $stmt->args[1]->value;
if ($second_arg instanceof PhpParser\Node\Expr\ConstFetch
&& strtolower($second_arg->name->parts[0]) === 'true'
) {
return 2;
}
return 1;
}
return 0;
}
/**
* @return 0|1|2
*/
protected static function hasTraitExistsCheck(PhpParser\Node\Expr\FuncCall $stmt): int
{
if ($stmt->name instanceof PhpParser\Node\Name
&& strtolower($stmt->name->parts[0]) === 'trait_exists'
) {
if (!isset($stmt->args[1])) {
return 2;
}
$second_arg = $stmt->args[1]->value;
if ($second_arg instanceof PhpParser\Node\Expr\ConstFetch
&& strtolower($second_arg->name->parts[0]) === 'true'
) {
return 2;
}
return 1;
}
return 0;
}
protected static function hasInterfaceExistsCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name
&& strtolower($stmt->name->parts[0]) === 'interface_exists'
) {
return true;
}
return false;
}
protected static function hasFunctionExistsCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name && strtolower($stmt->name->parts[0]) === 'function_exists') {
return true;
}
return false;
}
protected static function hasIntCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name &&
($stmt->name->parts === ['is_int'] ||
$stmt->name->parts === ['is_integer'] ||
$stmt->name->parts === ['is_long'])
) {
return true;
}
return false;
}
protected static function hasFloatCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name &&
($stmt->name->parts === ['is_float'] ||
$stmt->name->parts === ['is_real'] ||
$stmt->name->parts === ['is_double'])
) {
return true;
}
return false;
}
protected static function hasResourceCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name && $stmt->name->parts === ['is_resource']) {
return true;
}
return false;
}
protected static function hasScalarCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name && $stmt->name->parts === ['is_scalar']) {
return true;
}
return false;
}
protected static function hasCallableCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name && $stmt->name->parts === ['is_callable']) {
return true;
}
return false;
}
protected static function hasInArrayCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name
&& $stmt->name->parts === ['in_array']
&& isset($stmt->args[2])
) {
$second_arg = $stmt->args[2]->value;
if ($second_arg instanceof PhpParser\Node\Expr\ConstFetch
&& strtolower($second_arg->name->parts[0]) === 'true'
) {
return true;
}
}
return false;
}
protected static function hasNonEmptyCountCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name
&& $stmt->name->parts === ['count']
) {
return true;
}
return false;
}
protected static function hasArrayKeyExistsCheck(PhpParser\Node\Expr\FuncCall $stmt): bool
{
if ($stmt->name instanceof PhpParser\Node\Name && $stmt->name->parts === ['array_key_exists']) {
return true;
}
return false;
}
/**
* @param int $null_position
* @param array $if_types
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getNullInequalityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
FileSource $source,
?string $this_class_name,
?Codebase $codebase,
int $null_position
): array {
$if_types = [];
if ($null_position === self::ASSIGNMENT_TO_RIGHT) {
$base_conditional = $conditional->left;
} elseif ($null_position === self::ASSIGNMENT_TO_LEFT) {
$base_conditional = $conditional->right;
} else {
throw new \UnexpectedValueException('Bad null variable position');
}
$var_name = ExpressionIdentifier::getArrayVarId(
$base_conditional,
$this_class_name,
$source
);
if ($var_name) {
if ($base_conditional instanceof PhpParser\Node\Expr\Assign) {
$var_name = '=' . $var_name;
}
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\NotIdentical) {
$if_types[$var_name] = [['!null']];
} else {
$if_types[$var_name] = [['!falsy']];
}
}
if ($codebase
&& $source instanceof StatementsAnalyzer
&& ($var_type = $source->node_data->getType($base_conditional))
) {
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\NotIdentical) {
$null_type = Type::getNull();
if (!UnionTypeComparator::isContainedBy(
$codebase,
$var_type,
$null_type
) && !UnionTypeComparator::isContainedBy(
$codebase,
$null_type,
$var_type
)) {
if ($var_type->from_docblock) {
if (IssueBuffer::accepts(
new RedundantConditionGivenDocblockType(
'Docblock-defined type ' . $var_type . ' can never contain null',
new CodeLocation($source, $conditional),
$var_type->getId() . ' null'
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new RedundantCondition(
$var_type . ' can never contain null',
new CodeLocation($source, $conditional),
$var_type->getId() . ' null'
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
}
}
return $if_types ? [$if_types] : [];
}
/**
* @param int $false_position
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getFalseInequalityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
bool $cache,
?string $this_class_name,
FileSource $source,
bool $inside_conditional,
?Codebase $codebase,
bool $inside_negation,
int $false_position
) {
$if_types = [];
if ($false_position === self::ASSIGNMENT_TO_RIGHT) {
$base_conditional = $conditional->left;
} elseif ($false_position === self::ASSIGNMENT_TO_LEFT) {
$base_conditional = $conditional->right;
} else {
throw new \UnexpectedValueException('Bad false variable position');
}
$var_name = ExpressionIdentifier::getArrayVarId(
$base_conditional,
$this_class_name,
$source
);
if ($var_name) {
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\NotIdentical) {
$if_types[$var_name] = [['!false']];
} else {
$if_types[$var_name] = [['!falsy']];
}
$if_types = [$if_types];
} else {
$if_types = null;
if ($source instanceof StatementsAnalyzer && $cache) {
$if_types = $source->node_data->getAssertions($base_conditional);
}
if ($if_types === null) {
$if_types = self::scrapeAssertions(
$base_conditional,
$this_class_name,
$source,
$codebase,
$inside_negation,
$cache,
$inside_conditional
);
if ($source instanceof StatementsAnalyzer && $cache) {
$source->node_data->setAssertions($base_conditional, $if_types);
}
}
}
if ($codebase
&& $source instanceof StatementsAnalyzer
&& ($var_type = $source->node_data->getType($base_conditional))
) {
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\NotIdentical) {
$false_type = Type::getFalse();
if (!UnionTypeComparator::isContainedBy(
$codebase,
$var_type,
$false_type
) && !UnionTypeComparator::isContainedBy(
$codebase,
$false_type,
$var_type
)) {
if ($var_type->from_docblock) {
if (IssueBuffer::accepts(
new RedundantConditionGivenDocblockType(
'Docblock-defined type ' . $var_type . ' can never contain false',
new CodeLocation($source, $conditional),
$var_type->getId() . ' false'
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new RedundantCondition(
$var_type . ' can never contain false',
new CodeLocation($source, $conditional),
$var_type->getId() . ' false'
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
}
}
return $if_types;
}
/**
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getTrueInequalityAssertions(
int $true_position,
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
FileSource $source,
?Codebase $codebase,
bool $inside_negation,
bool $cache,
bool $inside_conditional
): array {
$if_types = [];
if ($true_position === self::ASSIGNMENT_TO_RIGHT) {
$base_conditional = $conditional->left;
} elseif ($true_position === self::ASSIGNMENT_TO_LEFT) {
$base_conditional = $conditional->right;
} else {
throw new \UnexpectedValueException('Bad null variable position');
}
if ($base_conditional instanceof PhpParser\Node\Expr\FuncCall) {
$notif_types = self::processFunctionCall(
$base_conditional,
$this_class_name,
$source,
$codebase,
!$inside_negation
);
} else {
$var_name = ExpressionIdentifier::getArrayVarId(
$base_conditional,
$this_class_name,
$source
);
if ($var_name) {
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\NotIdentical) {
$if_types[$var_name] = [['!true']];
} else {
$if_types[$var_name] = [['falsy']];
}
$notif_types = [];
} else {
$notif_types = null;
if ($source instanceof StatementsAnalyzer && $cache) {
$notif_types = $source->node_data->getAssertions($base_conditional);
}
if ($notif_types === null) {
$notif_types = self::scrapeAssertions(
$base_conditional,
$this_class_name,
$source,
$codebase,
$inside_negation,
$cache,
$inside_conditional
);
if ($source instanceof StatementsAnalyzer && $cache) {
$source->node_data->setAssertions($base_conditional, $notif_types);
}
}
}
}
if (count($notif_types) === 1) {
$notif_types = $notif_types[0];
if (count($notif_types) === 1) {
$if_types = \Psalm\Internal\Algebra::negateTypes($notif_types);
}
}
$if_types = $if_types ? [$if_types] : [];
if ($codebase
&& $source instanceof StatementsAnalyzer
&& ($var_type = $source->node_data->getType($base_conditional))
) {
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\NotIdentical) {
$true_type = Type::getTrue();
if (!UnionTypeComparator::isContainedBy(
$codebase,
$var_type,
$true_type
) && !UnionTypeComparator::isContainedBy(
$codebase,
$true_type,
$var_type
)) {
if ($var_type->from_docblock) {
if (IssueBuffer::accepts(
new RedundantConditionGivenDocblockType(
'Docblock-defined type ' . $var_type . ' can never contain true',
new CodeLocation($source, $conditional),
$var_type->getId() . ' true'
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new RedundantCondition(
$var_type . ' can never contain ' . $true_type,
new CodeLocation($source, $conditional),
$var_type->getId() . ' true'
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
}
}
return $if_types;
}
/**
* @param int $empty_array_position
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getEmptyInequalityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
FileSource $source,
?Codebase $codebase,
int $empty_array_position
): array {
$if_types = [];
if ($empty_array_position === self::ASSIGNMENT_TO_RIGHT) {
$base_conditional = $conditional->left;
} elseif ($empty_array_position === self::ASSIGNMENT_TO_LEFT) {
$base_conditional = $conditional->right;
} else {
throw new \UnexpectedValueException('Bad empty array variable position');
}
$var_name = ExpressionIdentifier::getArrayVarId(
$base_conditional,
$this_class_name,
$source
);
if ($var_name) {
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\NotIdentical) {
$if_types[$var_name] = [['non-empty-countable']];
} else {
$if_types[$var_name] = [['!falsy']];
}
}
if ($codebase
&& $source instanceof StatementsAnalyzer
&& ($var_type = $source->node_data->getType($base_conditional))
) {
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\NotIdentical) {
$empty_array_type = Type::getEmptyArray();
if (!UnionTypeComparator::isContainedBy(
$codebase,
$var_type,
$empty_array_type
) && !UnionTypeComparator::isContainedBy(
$codebase,
$empty_array_type,
$var_type
)) {
if ($var_type->from_docblock) {
if (IssueBuffer::accepts(
new RedundantConditionGivenDocblockType(
'Docblock-defined type ' . $var_type->getId() . ' can never contain null',
new CodeLocation($source, $conditional),
$var_type->getId() . ' null'
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new RedundantCondition(
$var_type->getId() . ' can never contain null',
new CodeLocation($source, $conditional),
$var_type->getId() . ' null'
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
}
}
return $if_types ? [$if_types] : [];
}
/**
* @param int $gettype_position
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getGettypeInequalityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
FileSource $source,
int $gettype_position
): array {
$if_types = [];
if ($gettype_position === self::ASSIGNMENT_TO_RIGHT) {
$whichclass_expr = $conditional->left;
$gettype_expr = $conditional->right;
} elseif ($gettype_position === self::ASSIGNMENT_TO_LEFT) {
$whichclass_expr = $conditional->right;
$gettype_expr = $conditional->left;
} else {
throw new \UnexpectedValueException('$gettype_position value');
}
/** @var PhpParser\Node\Expr\FuncCall $gettype_expr */
$var_name = ExpressionIdentifier::getArrayVarId(
$gettype_expr->args[0]->value,
$this_class_name,
$source
);
if ($whichclass_expr instanceof PhpParser\Node\Scalar\String_) {
$var_type = $whichclass_expr->value;
} elseif ($whichclass_expr instanceof PhpParser\Node\Expr\ClassConstFetch
&& $whichclass_expr->class instanceof PhpParser\Node\Name
) {
$var_type = ClassLikeAnalyzer::getFQCLNFromNameObject(
$whichclass_expr->class,
$source->getAliases()
);
} else {
throw new \UnexpectedValueException('Shouldn’t get here');
}
if (!isset(ClassLikeAnalyzer::GETTYPE_TYPES[$var_type])) {
if (IssueBuffer::accepts(
new UnevaluatedCode(
'gettype cannot return this value',
new CodeLocation($source, $whichclass_expr)
)
)) {
// fall through
}
} else {
if ($var_name && $var_type) {
$if_types[$var_name] = [['!' . $var_type]];
}
}
return $if_types ? [$if_types] : [];
}
/**
* @param int $get_debug_type_position
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getGetdebugTypeInequalityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
FileSource $source,
int $get_debug_type_position
): array {
$if_types = [];
if ($get_debug_type_position === self::ASSIGNMENT_TO_RIGHT) {
$whichclass_expr = $conditional->left;
$get_debug_type_expr = $conditional->right;
} elseif ($get_debug_type_position === self::ASSIGNMENT_TO_LEFT) {
$whichclass_expr = $conditional->right;
$get_debug_type_expr = $conditional->left;
} else {
throw new \UnexpectedValueException('$gettype_position value');
}
/** @var PhpParser\Node\Expr\FuncCall $get_debug_type_expr */
$var_name = ExpressionIdentifier::getArrayVarId(
$get_debug_type_expr->args[0]->value,
$this_class_name,
$source
);
if ($whichclass_expr instanceof PhpParser\Node\Scalar\String_) {
$var_type = $whichclass_expr->value;
} elseif ($whichclass_expr instanceof PhpParser\Node\Expr\ClassConstFetch
&& $whichclass_expr->class instanceof PhpParser\Node\Name
) {
$var_type = ClassLikeAnalyzer::getFQCLNFromNameObject(
$whichclass_expr->class,
$source->getAliases()
);
} else {
throw new \UnexpectedValueException('Shouldn’t get here');
}
if ($var_name && $var_type) {
if ($var_type === 'class@anonymous') {
$if_types[$var_name] = [['!=object']];
} elseif ($var_type === 'resource (closed)') {
$if_types[$var_name] = [['!closed-resource']];
} elseif (substr($var_type, 0, 10) === 'resource (') {
$if_types[$var_name] = [['!=resource']];
} else {
$if_types[$var_name] = [['!' . $var_type]];
}
}
return $if_types ? [$if_types] : [];
}
/**
* @param StatementsAnalyzer $source
* @param int $getclass_position
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getGetclassInequalityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
StatementsAnalyzer $source,
int $getclass_position
): array {
$if_types = [];
if ($getclass_position === self::ASSIGNMENT_TO_RIGHT) {
$whichclass_expr = $conditional->left;
$getclass_expr = $conditional->right;
} elseif ($getclass_position === self::ASSIGNMENT_TO_LEFT) {
$whichclass_expr = $conditional->right;
$getclass_expr = $conditional->left;
} else {
throw new \UnexpectedValueException('$getclass_position value');
}
if ($getclass_expr instanceof PhpParser\Node\Expr\FuncCall) {
$var_name = ExpressionIdentifier::getArrayVarId(
$getclass_expr->args[0]->value,
$this_class_name,
$source
);
} else {
$var_name = '$this';
}
if ($whichclass_expr instanceof PhpParser\Node\Scalar\String_) {
$var_type = $whichclass_expr->value;
} elseif ($whichclass_expr instanceof PhpParser\Node\Expr\ClassConstFetch
&& $whichclass_expr->class instanceof PhpParser\Node\Name
) {
$var_type = ClassLikeAnalyzer::getFQCLNFromNameObject(
$whichclass_expr->class,
$source->getAliases()
);
if ($var_type === 'self' || $var_type === 'static') {
$var_type = $this_class_name;
} elseif ($var_type === 'parent') {
$var_type = null;
}
} else {
$type = $source->node_data->getType($whichclass_expr);
if ($type && $var_name) {
foreach ($type->getAtomicTypes() as $type_part) {
if ($type_part instanceof Type\Atomic\TTemplateParamClass) {
$if_types[$var_name] = [['!=' . $type_part->param_name]];
}
}
}
return $if_types ? [$if_types] : [];
}
if ($var_type
&& ClassLikeAnalyzer::checkFullyQualifiedClassLikeName(
$source,
$var_type,
new CodeLocation($source, $whichclass_expr),
null,
null,
$source->getSuppressedIssues(),
false
) === false
) {
// fall through
} else {
if ($var_name && $var_type) {
$if_types[$var_name] = [['!=getclass-' . $var_type]];
}
}
return $if_types ? [$if_types] : [];
}
/**
* @param StatementsAnalyzer $source
* @param int $typed_value_position
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getTypedValueInequalityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
StatementsAnalyzer $source,
?Codebase $codebase,
int $typed_value_position
): array {
$if_types = [];
if ($typed_value_position === self::ASSIGNMENT_TO_RIGHT) {
$var_name = ExpressionIdentifier::getArrayVarId(
$conditional->left,
$this_class_name,
$source
);
$other_type = $source->node_data->getType($conditional->left);
$var_type = $source->node_data->getType($conditional->right);
} elseif ($typed_value_position === self::ASSIGNMENT_TO_LEFT) {
$var_name = ExpressionIdentifier::getArrayVarId(
$conditional->right,
$this_class_name,
$source
);
$var_type = $source->node_data->getType($conditional->left);
$other_type = $source->node_data->getType($conditional->right);
} else {
throw new \UnexpectedValueException('$typed_value_position value');
}
if ($var_type) {
if ($var_name) {
$not_identical = $conditional instanceof PhpParser\Node\Expr\BinaryOp\NotIdentical
|| ($other_type
&& (($var_type->isString() && $other_type->isString())
|| ($var_type->isInt() && $other_type->isInt())
|| ($var_type->isFloat() && $other_type->isFloat())
)
);
if ($not_identical) {
$if_types[$var_name] = [['!=' . $var_type->getAssertionString()]];
} else {
$if_types[$var_name] = [['!~' . $var_type->getAssertionString()]];
}
}
if ($codebase
&& $other_type
&& $conditional instanceof PhpParser\Node\Expr\BinaryOp\NotIdentical
) {
$parent_source = $source->getSource();
if ($parent_source->getSource() instanceof \Psalm\Internal\Analyzer\TraitAnalyzer
&& (($var_type->isSingleStringLiteral()
&& $var_type->getSingleStringLiteral()->value === $this_class_name)
|| ($other_type->isSingleStringLiteral()
&& $other_type->getSingleStringLiteral()->value === $this_class_name))
) {
// do nothing
} elseif (!UnionTypeComparator::canExpressionTypesBeIdentical(
$codebase,
$other_type,
$var_type
)) {
if ($var_type->from_docblock || $other_type->from_docblock) {
if (IssueBuffer::accepts(
new DocblockTypeContradiction(
$var_type . ' can never contain ' . $other_type->getId(),
new CodeLocation($source, $conditional),
$var_type . ' ' . $other_type
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new RedundantCondition(
$var_type->getId() . ' can never contain ' . $other_type->getId(),
new CodeLocation($source, $conditional),
$var_type->getId() . ' ' . $other_type->getId()
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
}
}
return $if_types ? [$if_types] : [];
}
/**
* @param int $null_position
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getNullEqualityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
FileSource $source,
?Codebase $codebase,
int $null_position
): array {
$if_types = [];
if ($null_position === self::ASSIGNMENT_TO_RIGHT) {
$base_conditional = $conditional->left;
} elseif ($null_position === self::ASSIGNMENT_TO_LEFT) {
$base_conditional = $conditional->right;
} else {
throw new \UnexpectedValueException('$null_position value');
}
$var_name = ExpressionIdentifier::getArrayVarId(
$base_conditional,
$this_class_name,
$source
);
if ($var_name && $base_conditional instanceof PhpParser\Node\Expr\Assign) {
$var_name = '=' . $var_name;
}
if ($var_name) {
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical) {
$if_types[$var_name] = [['null']];
} else {
$if_types[$var_name] = [['falsy']];
}
}
if ($codebase
&& $source instanceof StatementsAnalyzer
&& ($var_type = $source->node_data->getType($base_conditional))
&& $conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical
) {
$null_type = Type::getNull();
if (!UnionTypeComparator::isContainedBy(
$codebase,
$var_type,
$null_type
) && !UnionTypeComparator::isContainedBy(
$codebase,
$null_type,
$var_type
)) {
if ($var_type->from_docblock) {
if (IssueBuffer::accepts(
new DocblockTypeContradiction(
$var_type . ' does not contain null',
new CodeLocation($source, $conditional),
$var_type . ' null'
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new TypeDoesNotContainNull(
$var_type . ' does not contain null',
new CodeLocation($source, $conditional),
$var_type->getId()
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
}
return $if_types ? [$if_types] : [];
}
/**
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getTrueEqualityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
FileSource $source,
?Codebase $codebase,
bool $inside_negation,
bool $cache,
int $true_position
) {
$if_types = [];
if ($true_position === self::ASSIGNMENT_TO_RIGHT) {
$base_conditional = $conditional->left;
} elseif ($true_position === self::ASSIGNMENT_TO_LEFT) {
$base_conditional = $conditional->right;
} else {
throw new \UnexpectedValueException('Unrecognised position');
}
if ($base_conditional instanceof PhpParser\Node\Expr\FuncCall) {
$if_types = self::processFunctionCall(
$base_conditional,
$this_class_name,
$source,
$codebase,
$inside_negation
);
} else {
$var_name = ExpressionIdentifier::getArrayVarId(
$base_conditional,
$this_class_name,
$source
);
if ($var_name) {
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical) {
$if_types[$var_name] = [['true']];
} else {
$if_types[$var_name] = [['!falsy']];
}
$if_types = [$if_types];
} else {
$base_assertions = null;
if ($source instanceof StatementsAnalyzer && $cache) {
$base_assertions = $source->node_data->getAssertions($base_conditional);
}
if ($base_assertions === null) {
$base_assertions = self::scrapeAssertions(
$base_conditional,
$this_class_name,
$source,
$codebase,
$inside_negation,
$cache
);
if ($source instanceof StatementsAnalyzer && $cache) {
$source->node_data->setAssertions($base_conditional, $base_assertions);
}
}
$if_types = $base_assertions;
}
}
if ($codebase
&& $source instanceof StatementsAnalyzer
&& ($var_type = $source->node_data->getType($base_conditional))
&& $conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical
) {
$config = $source->getCodebase()->config;
if ($config->strict_binary_operands
&& $var_type->isSingle()
&& $var_type->hasBool()
&& !$var_type->from_docblock
) {
if (IssueBuffer::accepts(
new RedundantIdentityWithTrue(
'The "=== true" part of this comparison is redundant',
new CodeLocation($source, $conditional)
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
$true_type = Type::getTrue();
if (!UnionTypeComparator::canExpressionTypesBeIdentical(
$codebase,
$true_type,
$var_type
)) {
if ($var_type->from_docblock) {
if (IssueBuffer::accepts(
new DocblockTypeContradiction(
$var_type . ' does not contain true',
new CodeLocation($source, $conditional),
$var_type . ' true'
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new TypeDoesNotContainType(
$var_type . ' does not contain true',
new CodeLocation($source, $conditional),
$var_type . ' true'
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
}
return $if_types;
}
/**
* @param int $false_position
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getFalseEqualityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
FileSource $source,
?Codebase $codebase,
bool $inside_negation,
bool $cache,
bool $inside_conditional,
int $false_position
): array {
$if_types = [];
if ($false_position === self::ASSIGNMENT_TO_RIGHT) {
$base_conditional = $conditional->left;
} elseif ($false_position === self::ASSIGNMENT_TO_LEFT) {
$base_conditional = $conditional->right;
} else {
throw new \UnexpectedValueException('$false_position value');
}
if ($base_conditional instanceof PhpParser\Node\Expr\FuncCall) {
$notif_types = self::processFunctionCall(
$base_conditional,
$this_class_name,
$source,
$codebase,
!$inside_negation
);
} else {
$var_name = ExpressionIdentifier::getArrayVarId(
$base_conditional,
$this_class_name,
$source
);
if ($var_name) {
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical) {
$if_types[$var_name] = [['false']];
} else {
$if_types[$var_name] = [['falsy']];
}
$notif_types = [];
} else {
$notif_types = null;
if ($source instanceof StatementsAnalyzer && $cache) {
$notif_types = $source->node_data->getAssertions($base_conditional);
}
if ($notif_types === null) {
$notif_types = self::scrapeAssertions(
$base_conditional,
$this_class_name,
$source,
$codebase,
$inside_negation,
$cache,
$inside_conditional
);
if ($source instanceof StatementsAnalyzer && $cache) {
$source->node_data->setAssertions($base_conditional, $notif_types);
}
}
}
}
if (count($notif_types) === 1) {
$notif_types = $notif_types[0];
if (count($notif_types) === 1) {
$if_types = \Psalm\Internal\Algebra::negateTypes($notif_types);
}
}
$if_types = $if_types ? [$if_types] : [];
if ($codebase
&& $source instanceof StatementsAnalyzer
&& ($var_type = $source->node_data->getType($base_conditional))
) {
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical) {
$false_type = Type::getFalse();
if (!UnionTypeComparator::canExpressionTypesBeIdentical(
$codebase,
$false_type,
$var_type
)) {
if ($var_type->from_docblock) {
if (IssueBuffer::accepts(
new DocblockTypeContradiction(
$var_type . ' does not contain false',
new CodeLocation($source, $conditional),
$var_type . ' false'
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new TypeDoesNotContainType(
$var_type . ' does not contain false',
new CodeLocation($source, $conditional),
$var_type . ' false'
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
}
}
return $if_types;
}
/**
* @param int $empty_array_position
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getEmptyArrayEqualityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
FileSource $source,
?Codebase $codebase,
int $empty_array_position
): array {
$if_types = [];
if ($empty_array_position === self::ASSIGNMENT_TO_RIGHT) {
$base_conditional = $conditional->left;
} elseif ($empty_array_position === self::ASSIGNMENT_TO_LEFT) {
$base_conditional = $conditional->right;
} else {
throw new \UnexpectedValueException('$empty_array_position value');
}
$var_name = ExpressionIdentifier::getArrayVarId(
$base_conditional,
$this_class_name,
$source
);
if ($var_name) {
if ($conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical) {
$if_types[$var_name] = [['!non-empty-countable']];
} else {
$if_types[$var_name] = [['falsy']];
}
}
if ($codebase
&& $source instanceof StatementsAnalyzer
&& ($var_type = $source->node_data->getType($base_conditional))
&& $conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical
) {
$empty_array_type = Type::getEmptyArray();
if (!UnionTypeComparator::canExpressionTypesBeIdentical(
$codebase,
$empty_array_type,
$var_type
)) {
if ($var_type->from_docblock) {
if (IssueBuffer::accepts(
new DocblockTypeContradiction(
$var_type . ' does not contain an empty array',
new CodeLocation($source, $conditional),
null
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new TypeDoesNotContainType(
$var_type . ' does not contain empty array',
new CodeLocation($source, $conditional),
null
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
}
return $if_types ? [$if_types] : [];
}
/**
* @param int $gettype_position
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getGettypeEqualityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
FileSource $source,
int $gettype_position
): array {
$if_types = [];
if ($gettype_position === self::ASSIGNMENT_TO_RIGHT) {
$string_expr = $conditional->left;
$gettype_expr = $conditional->right;
} elseif ($gettype_position === self::ASSIGNMENT_TO_LEFT) {
$string_expr = $conditional->right;
$gettype_expr = $conditional->left;
} else {
throw new \UnexpectedValueException('$gettype_position value');
}
/** @var PhpParser\Node\Expr\FuncCall $gettype_expr */
$var_name = ExpressionIdentifier::getArrayVarId(
$gettype_expr->args[0]->value,
$this_class_name,
$source
);
/** @var PhpParser\Node\Scalar\String_ $string_expr */
$var_type = $string_expr->value;
if (!isset(ClassLikeAnalyzer::GETTYPE_TYPES[$var_type])) {
if (IssueBuffer::accepts(
new UnevaluatedCode(
'gettype cannot return this value',
new CodeLocation($source, $string_expr)
)
)) {
// fall through
}
} else {
if ($var_name && $var_type) {
$if_types[$var_name] = [[$var_type]];
}
}
return $if_types ? [$if_types] : [];
}
/**
* @param int $get_debug_type_position
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getGetdebugtypeEqualityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
FileSource $source,
int $get_debug_type_position
): array {
$if_types = [];
if ($get_debug_type_position === self::ASSIGNMENT_TO_RIGHT) {
$whichclass_expr = $conditional->left;
$get_debug_type_expr = $conditional->right;
} elseif ($get_debug_type_position === self::ASSIGNMENT_TO_LEFT) {
$whichclass_expr = $conditional->right;
$get_debug_type_expr = $conditional->left;
} else {
throw new \UnexpectedValueException('$gettype_position value');
}
/** @var PhpParser\Node\Expr\FuncCall $get_debug_type_expr */
$var_name = ExpressionIdentifier::getArrayVarId(
$get_debug_type_expr->args[0]->value,
$this_class_name,
$source
);
if ($whichclass_expr instanceof PhpParser\Node\Scalar\String_) {
$var_type = $whichclass_expr->value;
} elseif ($whichclass_expr instanceof PhpParser\Node\Expr\ClassConstFetch
&& $whichclass_expr->class instanceof PhpParser\Node\Name
) {
$var_type = ClassLikeAnalyzer::getFQCLNFromNameObject(
$whichclass_expr->class,
$source->getAliases()
);
} else {
throw new \UnexpectedValueException('Shouldn’t get here');
}
if ($var_name && $var_type) {
if ($var_type === 'class@anonymous') {
$if_types[$var_name] = [['=object']];
} elseif ($var_type === 'resource (closed)') {
$if_types[$var_name] = [['closed-resource']];
} elseif (substr($var_type, 0, 10) === 'resource (') {
$if_types[$var_name] = [['=resource']];
} else {
$if_types[$var_name] = [[$var_type]];
}
}
return $if_types ? [$if_types] : [];
}
/**
* @param StatementsAnalyzer $source
* @param int $getclass_position
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getGetclassEqualityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
StatementsAnalyzer $source,
int $getclass_position
): array {
$if_types = [];
if ($getclass_position === self::ASSIGNMENT_TO_RIGHT) {
$whichclass_expr = $conditional->left;
$getclass_expr = $conditional->right;
} elseif ($getclass_position === self::ASSIGNMENT_TO_LEFT) {
$whichclass_expr = $conditional->right;
$getclass_expr = $conditional->left;
} else {
throw new \UnexpectedValueException('$getclass_position value');
}
if ($getclass_expr instanceof PhpParser\Node\Expr\FuncCall && isset($getclass_expr->args[0])) {
$var_name = ExpressionIdentifier::getArrayVarId(
$getclass_expr->args[0]->value,
$this_class_name,
$source
);
} else {
$var_name = '$this';
}
if ($whichclass_expr instanceof PhpParser\Node\Expr\ClassConstFetch
&& $whichclass_expr->class instanceof PhpParser\Node\Name
) {
$var_type = ClassLikeAnalyzer::getFQCLNFromNameObject(
$whichclass_expr->class,
$source->getAliases()
);
if ($var_type === 'self' || $var_type === 'static') {
$var_type = $this_class_name;
} elseif ($var_type === 'parent') {
$var_type = null;
}
if ($var_type) {
if (ClassLikeAnalyzer::checkFullyQualifiedClassLikeName(
$source,
$var_type,
new CodeLocation($source, $whichclass_expr),
null,
null,
$source->getSuppressedIssues(),
true
) === false
) {
return [];
}
}
if ($var_name && $var_type) {
$if_types[$var_name] = [['=getclass-' . $var_type]];
}
} else {
$type = $source->node_data->getType($whichclass_expr);
if ($type && $var_name) {
foreach ($type->getAtomicTypes() as $type_part) {
if ($type_part instanceof Type\Atomic\TTemplateParamClass) {
$if_types[$var_name] = [['=' . $type_part->param_name]];
}
}
}
}
return $if_types ? [$if_types] : [];
}
/**
* @param StatementsAnalyzer $source
* @param int $typed_value_position
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getTypedValueEqualityAssertions(
PhpParser\Node\Expr\BinaryOp $conditional,
?string $this_class_name,
StatementsAnalyzer $source,
?Codebase $codebase,
int $typed_value_position
): array {
$if_types = [];
if ($typed_value_position === self::ASSIGNMENT_TO_RIGHT) {
$var_name = ExpressionIdentifier::getArrayVarId(
$conditional->left,
$this_class_name,
$source
);
$other_type = $source->node_data->getType($conditional->left);
$var_type = $source->node_data->getType($conditional->right);
} elseif ($typed_value_position === self::ASSIGNMENT_TO_LEFT) {
$var_name = ExpressionIdentifier::getArrayVarId(
$conditional->right,
$this_class_name,
$source
);
$var_type = $source->node_data->getType($conditional->left);
$other_type = $source->node_data->getType($conditional->right);
} else {
throw new \UnexpectedValueException('$typed_value_position value');
}
if ($var_name && $var_type) {
$identical = $conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical
|| ($other_type
&& (($var_type->isString(true) && $other_type->isString(true))
|| ($var_type->isInt(true) && $other_type->isInt(true))
|| ($var_type->isFloat() && $other_type->isFloat())
)
);
if ($identical) {
$if_types[$var_name] = [['=' . $var_type->getAssertionString()]];
} else {
$if_types[$var_name] = [['~' . $var_type->getAssertionString()]];
}
}
if ($codebase
&& $other_type
&& $var_type
&& ($conditional instanceof PhpParser\Node\Expr\BinaryOp\Identical
|| ($other_type->isString()
&& $var_type->isString())
)
) {
$parent_source = $source->getSource();
if ($parent_source->getSource() instanceof \Psalm\Internal\Analyzer\TraitAnalyzer
&& (($var_type->isSingleStringLiteral()
&& $var_type->getSingleStringLiteral()->value === $this_class_name)
|| ($other_type->isSingleStringLiteral()
&& $other_type->getSingleStringLiteral()->value === $this_class_name))
) {
// do nothing
} elseif (!UnionTypeComparator::canExpressionTypesBeIdentical(
$codebase,
$other_type,
$var_type
)) {
if ($var_type->from_docblock || $other_type->from_docblock) {
if (IssueBuffer::accepts(
new DocblockTypeContradiction(
$var_type->getId() . ' does not contain ' . $other_type->getId(),
new CodeLocation($source, $conditional),
$var_type->getId() . ' ' . $other_type->getId()
),
$source->getSuppressedIssues()
)) {
// fall through
}
} else {
if (IssueBuffer::accepts(
new TypeDoesNotContainType(
$var_type->getId() . ' cannot be identical to ' . $other_type->getId(),
new CodeLocation($source, $conditional),
$var_type->getId() . ' ' . $other_type->getId()
),
$source->getSuppressedIssues()
)) {
// fall through
}
}
}
}
return $if_types ? [$if_types] : [];
}
/**
* @param PhpParser\Node\Expr\FuncCall $expr
* @param StatementsAnalyzer $source
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getIsaAssertions(
PhpParser\Node\Expr\FuncCall $expr,
StatementsAnalyzer $source,
?string $this_class_name,
?string $first_var_name
): array {
$if_types = [];
if ($expr->args[0]->value instanceof PhpParser\Node\Expr\ClassConstFetch
&& $expr->args[0]->value->name instanceof PhpParser\Node\Identifier
&& strtolower($expr->args[0]->value->name->name) === 'class'
&& $expr->args[0]->value->class instanceof PhpParser\Node\Name
&& count($expr->args[0]->value->class->parts) === 1
&& strtolower($expr->args[0]->value->class->parts[0]) === 'static'
) {
$first_var_name = '$this';
}
if ($first_var_name) {
$first_arg = $expr->args[0]->value;
$second_arg = $expr->args[1]->value;
$third_arg = isset($expr->args[2]->value) ? $expr->args[2]->value : null;
if ($third_arg instanceof PhpParser\Node\Expr\ConstFetch) {
if (!in_array(strtolower($third_arg->name->parts[0]), ['true', 'false'])) {
return [];
}
$third_arg_value = strtolower($third_arg->name->parts[0]);
} else {
$third_arg_value = $expr->name instanceof PhpParser\Node\Name
&& strtolower($expr->name->parts[0]) === 'is_subclass_of'
? 'true'
: 'false';
}
$is_a_prefix = $third_arg_value === 'true' ? 'isa-string-' : 'isa-';
if (($first_arg_type = $source->node_data->getType($first_arg))
&& $first_arg_type->isSingleStringLiteral()
&& $source->getSource()->getSource() instanceof \Psalm\Internal\Analyzer\TraitAnalyzer
&& $first_arg_type->getSingleStringLiteral()->value === $this_class_name
) {
// do nothing
} else {
if ($second_arg instanceof PhpParser\Node\Scalar\String_) {
$fq_class_name = $second_arg->value;
if ($fq_class_name[0] === '\\') {
$fq_class_name = substr($fq_class_name, 1);
}
$if_types[$first_var_name] = [[$is_a_prefix . $fq_class_name]];
} elseif ($second_arg instanceof PhpParser\Node\Expr\ClassConstFetch
&& $second_arg->class instanceof PhpParser\Node\Name
&& $second_arg->name instanceof PhpParser\Node\Identifier
&& strtolower($second_arg->name->name) === 'class'
) {
$class_node = $second_arg->class;
if ($class_node->parts === ['static']) {
if ($this_class_name) {
$if_types[$first_var_name] = [[$is_a_prefix . $this_class_name . '&static']];
}
} elseif ($class_node->parts === ['self']) {
if ($this_class_name) {
$if_types[$first_var_name] = [[$is_a_prefix . $this_class_name]];
}
} elseif ($class_node->parts === ['parent']) {
// do nothing
} else {
$if_types[$first_var_name] = [[
$is_a_prefix
. ClassLikeAnalyzer::getFQCLNFromNameObject(
$class_node,
$source->getAliases()
)
]];
}
} elseif (($second_arg_type = $source->node_data->getType($second_arg))
&& $second_arg_type->hasString()
) {
$vals = [];
foreach ($second_arg_type->getAtomicTypes() as $second_arg_atomic_type) {
if ($second_arg_atomic_type instanceof Type\Atomic\TTemplateParamClass) {
$vals[] = [$is_a_prefix . $second_arg_atomic_type->param_name];
}
}
if ($vals) {
$if_types[$first_var_name] = $vals;
}
}
}
}
return $if_types ? [$if_types] : [];
}
/**
* @param PhpParser\Node\Expr\FuncCall $expr
* @param StatementsAnalyzer $source
* @param string|null $first_var_name
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getInarrayAssertions(
PhpParser\Node\Expr\FuncCall $expr,
StatementsAnalyzer $source,
?string $first_var_name
): array {
$if_types = [];
if ($first_var_name
&& ($second_arg_type = $source->node_data->getType($expr->args[1]->value))
&& isset($expr->args[0]->value)
&& !$expr->args[0]->value instanceof PhpParser\Node\Expr\ClassConstFetch
) {
foreach ($second_arg_type->getAtomicTypes() as $atomic_type) {
if ($atomic_type instanceof Type\Atomic\TArray
|| $atomic_type instanceof Type\Atomic\TKeyedArray
|| $atomic_type instanceof Type\Atomic\TList
) {
if ($atomic_type instanceof Type\Atomic\TList) {
$value_type = $atomic_type->type_param;
} elseif ($atomic_type instanceof Type\Atomic\TKeyedArray) {
$value_type = $atomic_type->getGenericValueType();
} else {
$value_type = $atomic_type->type_params[1];
}
$array_literal_types = array_merge(
$value_type->getLiteralStrings(),
$value_type->getLiteralInts(),
$value_type->getLiteralFloats()
);
if ($array_literal_types
&& count($value_type->getAtomicTypes())
) {
$literal_assertions = [];
foreach ($array_literal_types as $array_literal_type) {
$literal_assertions[] = '=' . $array_literal_type->getId();
}
if ($value_type->isFalsable()) {
$literal_assertions[] = 'false';
}
if ($value_type->isNullable()) {
$literal_assertions[] = 'null';
}
$if_types[$first_var_name] = [$literal_assertions];
}
}
}
}
return $if_types ? [$if_types] : [];
}
/**
* @param PhpParser\Node\Expr\FuncCall $expr
* @param Type\Union|null $first_var_type
* @param string|null $first_var_name
* @return list<non-empty-array<string, non-empty-list<non-empty-list<string>>>>
*/
private static function getArrayKeyExistsAssertions(
PhpParser\Node\Expr\FuncCall $expr,
?Type\Union $first_var_type,
?string $first_var_name,
FileSource $source,
?string $this_class_name
): array {
$if_types = [];
$literal_assertions = [];
if (isset($expr->args[0])
&& isset($expr->args[1])
&& $first_var_type
&& $first_var_name
&& !$expr->args[0]->value instanceof PhpParser\Node\Expr\ClassConstFetch
&& $source instanceof StatementsAnalyzer
&& ($second_var_type = $source->node_data->getType($expr->args[1]->value))
) {
foreach ($second_var_type->getAtomicTypes() as $atomic_type) {
if ($atomic_type instanceof Type\Atomic\TArray
|| $atomic_type instanceof Type\Atomic\TKeyedArray
) {
if ($atomic_type instanceof Type\Atomic\TKeyedArray) {
$key_possibly_undefined = false;
foreach ($atomic_type->properties as $property_type) {
if ($property_type->possibly_undefined) {
$key_possibly_undefined = true;
break;
}
}
$key_type = $atomic_type->getGenericKeyType();
if ($key_possibly_undefined) {
$key_type->possibly_undefined = true;
}
} else {
$key_type = $atomic_type->type_params[0];
}
if ($key_type->allStringLiterals() && !$key_type->possibly_undefined) {
foreach ($key_type->getLiteralStrings() as $array_literal_type) {
$literal_assertions[] = '=' . $array_literal_type->getId();
}
} elseif ($key_type->allIntLiterals() && !$key_type->possibly_undefined) {
foreach ($key_type->getLiteralInts() as $array_literal_type) {
$literal_assertions[] = '=' . $array_literal_type->getId();
}
}
}
}
}
if ($literal_assertions && $first_var_name) {
$if_types[$first_var_name] = [$literal_assertions];
} else {
$array_root = isset($expr->args[1]->value)
? ExpressionIdentifier::getArrayVarId(
$expr->args[1]->value,
$this_class_name,
$source
)
: null;
if ($array_root) {
if ($first_var_name === null && isset($expr->args[0])) {
$first_arg = $expr->args[0];
if ($first_arg->value instanceof PhpParser\Node\Scalar\String_) {
$first_var_name = '\'' . $first_arg->value->value . '\'';
} elseif ($first_arg->value instanceof PhpParser\Node\Scalar\LNumber) {
$first_var_name = (string)$first_arg->value->value;
}
}
if ($expr->args[0]->value instanceof PhpParser\Node\Expr\ClassConstFetch
&& $expr->args[0]->value->name instanceof PhpParser\Node\Identifier
&& $expr->args[0]->value->name->name !== 'class'
) {
$const_type = null;
if ($source instanceof StatementsAnalyzer) {
$const_type = $source->node_data->getType($expr->args[0]->value);
}
if ($const_type) {
if ($const_type->isSingleStringLiteral()) {
$first_var_name = $const_type->getSingleStringLiteral()->value;
} elseif ($const_type->isSingleIntLiteral()) {
$first_var_name = (string)$const_type->getSingleIntLiteral()->value;
} else {
$first_var_name = null;
}
} else {
$first_var_name = null;
}
}
if ($first_var_name !== null
&& !strpos($first_var_name, '->')
&& !strpos($first_var_name, '[')
) {
$if_types[$array_root . '[' . $first_var_name . ']'] = [['array-key-exists']];
}
}
}
return $if_types ? [$if_types] : [];
}
}
| 1 | 9,648 | This feels hacky, is there a better way? | vimeo-psalm | php |
@@ -22,9 +22,14 @@
# IN THE SOFTWARE.
#
from math import ceil
-from boto.compat import json, map, six
+from boto.compat import json, six
import requests
+try:
+ from collections import OrderedDict
+except ImportError:
+ from ordereddict import OrderedDict
+
class SearchServiceException(Exception):
pass | 1 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from math import ceil
from boto.compat import json, map, six
import requests
class SearchServiceException(Exception):
pass
class CommitMismatchError(Exception):
pass
class SearchResults(object):
def __init__(self, **attrs):
self.rid = attrs['info']['rid']
# self.doc_coverage_pct = attrs['info']['doc-coverage-pct']
self.cpu_time_ms = attrs['info']['cpu-time-ms']
self.time_ms = attrs['info']['time-ms']
self.hits = attrs['hits']['found']
self.docs = attrs['hits']['hit']
self.start = attrs['hits']['start']
self.rank = attrs['rank']
self.match_expression = attrs['match-expr']
self.query = attrs['query']
self.search_service = attrs['search_service']
self.facets = {}
if 'facets' in attrs:
for (facet, values) in attrs['facets'].items():
if 'constraints' in values:
self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values['constraints']))
self.num_pages_needed = ceil(self.hits / self.query.real_size)
def __len__(self):
return len(self.docs)
def __iter__(self):
return iter(self.docs)
def next_page(self):
"""Call Cloudsearch to get the next page of search results
:rtype: :class:`boto.cloudsearch.search.SearchResults`
:return: the following page of search results
"""
if self.query.page <= self.num_pages_needed:
self.query.start += self.query.real_size
self.query.page += 1
return self.search_service(self.query)
else:
raise StopIteration
class Query(object):
RESULTS_PER_PAGE = 500
def __init__(self, q=None, bq=None, rank=None,
return_fields=None, size=10,
start=0, facet=None, facet_constraints=None,
facet_sort=None, facet_top_n=None, t=None):
self.q = q
self.bq = bq
self.rank = rank or []
self.return_fields = return_fields or []
self.start = start
self.facet = facet or []
self.facet_constraints = facet_constraints or {}
self.facet_sort = facet_sort or {}
self.facet_top_n = facet_top_n or {}
self.t = t or {}
self.page = 0
self.update_size(size)
def update_size(self, new_size):
self.size = new_size
self.real_size = Query.RESULTS_PER_PAGE if (self.size >
Query.RESULTS_PER_PAGE or self.size == 0) else self.size
def to_params(self):
"""Transform search parameters from instance properties to a dictionary
:rtype: dict
:return: search parameters
"""
params = {'start': self.start, 'size': self.real_size}
if self.q:
params['q'] = self.q
if self.bq:
params['bq'] = self.bq
if self.rank:
params['rank'] = ','.join(self.rank)
if self.return_fields:
params['return-fields'] = ','.join(self.return_fields)
if self.facet:
params['facet'] = ','.join(self.facet)
if self.facet_constraints:
for k, v in six.iteritems(self.facet_constraints):
params['facet-%s-constraints' % k] = v
if self.facet_sort:
for k, v in six.iteritems(self.facet_sort):
params['facet-%s-sort' % k] = v
if self.facet_top_n:
for k, v in six.iteritems(self.facet_top_n):
params['facet-%s-top-n' % k] = v
if self.t:
for k, v in six.iteritems(self.t):
params['t-%s' % k] = v
return params
class SearchConnection(object):
def __init__(self, domain=None, endpoint=None):
self.domain = domain
self.endpoint = endpoint
if not endpoint:
self.endpoint = domain.search_service_endpoint
def build_query(self, q=None, bq=None, rank=None, return_fields=None,
size=10, start=0, facet=None, facet_constraints=None,
facet_sort=None, facet_top_n=None, t=None):
return Query(q=q, bq=bq, rank=rank, return_fields=return_fields,
size=size, start=start, facet=facet,
facet_constraints=facet_constraints,
facet_sort=facet_sort, facet_top_n=facet_top_n, t=t)
def search(self, q=None, bq=None, rank=None, return_fields=None,
size=10, start=0, facet=None, facet_constraints=None,
facet_sort=None, facet_top_n=None, t=None):
"""
Send a query to CloudSearch
Each search query should use at least the q or bq argument to specify
the search parameter. The other options are used to specify the
criteria of the search.
:type q: string
:param q: A string to search the default search fields for.
:type bq: string
:param bq: A string to perform a Boolean search. This can be used to
create advanced searches.
:type rank: List of strings
:param rank: A list of fields or rank expressions used to order the
search results. A field can be reversed by using the - operator.
``['-year', 'author']``
:type return_fields: List of strings
:param return_fields: A list of fields which should be returned by the
search. If this field is not specified, only IDs will be returned.
``['headline']``
:type size: int
:param size: Number of search results to specify
:type start: int
:param start: Offset of the first search result to return (can be used
for paging)
:type facet: list
:param facet: List of fields for which facets should be returned
``['colour', 'size']``
:type facet_constraints: dict
:param facet_constraints: Use to limit facets to specific values
specified as comma-delimited strings in a Dictionary of facets
``{'colour': "'blue','white','red'", 'size': "big"}``
:type facet_sort: dict
:param facet_sort: Rules used to specify the order in which facet
values should be returned. Allowed values are *alpha*, *count*,
*max*, *sum*. Use *alpha* to sort alphabetical, and *count* to sort
the facet by number of available result.
``{'color': 'alpha', 'size': 'count'}``
:type facet_top_n: dict
:param facet_top_n: Dictionary of facets and number of facets to
return.
``{'colour': 2}``
:type t: dict
:param t: Specify ranges for specific fields
``{'year': '2000..2005'}``
:rtype: :class:`boto.cloudsearch.search.SearchResults`
:return: Returns the results of this search
The following examples all assume we have indexed a set of documents
with fields: *author*, *date*, *headline*
A simple search will look for documents whose default text search
fields will contain the search word exactly:
>>> search(q='Tim') # Return documents with the word Tim in them (but not Timothy)
A simple search with more keywords will return documents whose default
text search fields contain the search strings together or separately.
>>> search(q='Tim apple') # Will match "tim" and "apple"
More complex searches require the boolean search operator.
Wildcard searches can be used to search for any words that start with
the search string.
>>> search(bq="'Tim*'") # Return documents with words like Tim or Timothy)
Search terms can also be combined. Allowed operators are "and", "or",
"not", "field", "optional", "token", "phrase", or "filter"
>>> search(bq="(and 'Tim' (field author 'John Smith'))")
Facets allow you to show classification information about the search
results. For example, you can retrieve the authors who have written
about Tim:
>>> search(q='Tim', facet=['Author'])
With facet_constraints, facet_top_n and facet_sort more complicated
constraints can be specified such as returning the top author out of
John Smith and Mark Smith who have a document with the word Tim in it.
>>> search(q='Tim',
... facet=['Author'],
... facet_constraints={'author': "'John Smith','Mark Smith'"},
... facet=['author'],
... facet_top_n={'author': 1},
... facet_sort={'author': 'count'})
"""
query = self.build_query(q=q, bq=bq, rank=rank,
return_fields=return_fields,
size=size, start=start, facet=facet,
facet_constraints=facet_constraints,
facet_sort=facet_sort,
facet_top_n=facet_top_n, t=t)
return self(query)
def __call__(self, query):
"""Make a call to CloudSearch
:type query: :class:`boto.cloudsearch.search.Query`
:param query: A group of search criteria
:rtype: :class:`boto.cloudsearch.search.SearchResults`
:return: search results
"""
url = "http://%s/2011-02-01/search" % (self.endpoint)
params = query.to_params()
r = requests.get(url, params=params)
body = r.content.decode('utf-8')
try:
data = json.loads(body)
except ValueError as e:
if r.status_code == 403:
msg = ''
import re
g = re.search('<html><body><h1>403 Forbidden</h1>([^<]+)<', body)
try:
msg = ': %s' % (g.groups()[0].strip())
except AttributeError:
pass
raise SearchServiceException('Authentication error from Amazon%s' % msg)
raise SearchServiceException("Got non-json response from Amazon. %s" % body, query)
if 'messages' in data and 'error' in data:
for m in data['messages']:
if m['severity'] == 'fatal':
raise SearchServiceException("Error processing search %s "
"=> %s" % (params, m['message']), query)
elif 'error' in data:
raise SearchServiceException("Unknown error processing search %s"
% json.dumps(data), query)
data['query'] = query
data['search_service'] = self
return SearchResults(**data)
def get_all_paged(self, query, per_page):
"""Get a generator to iterate over all pages of search results
:type query: :class:`boto.cloudsearch.search.Query`
:param query: A group of search criteria
:type per_page: int
:param per_page: Number of docs in each :class:`boto.cloudsearch.search.SearchResults` object.
:rtype: generator
:return: Generator containing :class:`boto.cloudsearch.search.SearchResults`
"""
query.update_size(per_page)
page = 0
num_pages_needed = 0
while page <= num_pages_needed:
results = self(query)
num_pages_needed = results.num_pages_needed
yield results
query.start += query.real_size
page += 1
def get_all_hits(self, query):
"""Get a generator to iterate over all search results
Transparently handles the results paging from Cloudsearch
search results so even if you have many thousands of results
you can iterate over all results in a reasonably efficient
manner.
:type query: :class:`boto.cloudsearch.search.Query`
:param query: A group of search criteria
:rtype: generator
:return: All docs matching query
"""
page = 0
num_pages_needed = 0
while page <= num_pages_needed:
results = self(query)
num_pages_needed = results.num_pages_needed
for doc in results:
yield doc
query.start += query.real_size
page += 1
def get_num_hits(self, query):
"""Return the total number of hits for query
:type query: :class:`boto.cloudsearch.search.Query`
:param query: a group of search criteria
:rtype: int
:return: Total number of hits for query
"""
query.update_size(1)
return self(query).hits
| 1 | 11,229 | Can we move this logic into the `boto.compat` module? Then it's just `from boto.compat import OrderedDict` instead. Also, this is introducing a new dependency. What about users on 2.6.x that don't have the OrderedDict module installed? We may need to fall back to an ordinary `dict` so that existing code in the wild does not break. | boto-boto | py |
@@ -36,14 +36,14 @@ namespace Fixtures.Azure.AcceptanceTestsAzureBodyDuration
public Uri BaseUri { get; set; }
/// <summary>
- /// Gets or sets json serialization settings.
+ /// Gets JSON serialization settings.
/// </summary>
public JsonSerializerSettings SerializationSettings { get; private set; }
/// <summary>
- /// Gets or sets json deserialization settings.
+ /// Gets JSON deserialization settings.
/// </summary>
- public JsonSerializerSettings DeserializationSettings { get; private set; }
+ public JsonSerializerSettings DeserializationSettings { get; private set; }
/// <summary>
/// Gets Azure subscription credentials. | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for
// license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
namespace Fixtures.Azure.AcceptanceTestsAzureBodyDuration
{
using System;
using System.Linq;
using System.Collections.Generic;
using System.Diagnostics;
using System.Net;
using System.Net.Http;
using System.Net.Http.Headers;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Rest;
using Microsoft.Rest.Serialization;
using Newtonsoft.Json;
using Microsoft.Rest.Azure;
using Models;
/// <summary>
/// Test Infrastructure for AutoRest
/// </summary>
public partial class AutoRestDurationTestService : ServiceClient<AutoRestDurationTestService>, IAutoRestDurationTestService, IAzureClient
{
/// <summary>
/// The base URI of the service.
/// </summary>
public Uri BaseUri { get; set; }
/// <summary>
/// Gets or sets json serialization settings.
/// </summary>
public JsonSerializerSettings SerializationSettings { get; private set; }
/// <summary>
/// Gets or sets json deserialization settings.
/// </summary>
public JsonSerializerSettings DeserializationSettings { get; private set; }
/// <summary>
/// Gets Azure subscription credentials.
/// </summary>
public ServiceClientCredentials Credentials { get; private set; }
/// <summary>
/// Gets or sets the preferred language for the response.
/// </summary>
public string AcceptLanguage { get; set; }
/// <summary>
/// Gets or sets the retry timeout in seconds for Long Running Operations.
/// Default value is 30.
/// </summary>
public int? LongRunningOperationRetryTimeout { get; set; }
/// <summary>
/// When set to true a unique x-ms-client-request-id value is generated and
/// included in each request. Default is true.
/// </summary>
public bool? GenerateClientRequestId { get; set; }
/// <summary>
/// Gets the IDurationOperations.
/// </summary>
public virtual IDurationOperations Duration { get; private set; }
/// <summary>
/// Initializes a new instance of the AutoRestDurationTestService class.
/// </summary>
/// <param name='handlers'>
/// Optional. The delegating handlers to add to the http client pipeline.
/// </param>
protected AutoRestDurationTestService(params DelegatingHandler[] handlers) : base(handlers)
{
this.Initialize();
}
/// <summary>
/// Initializes a new instance of the AutoRestDurationTestService class.
/// </summary>
/// <param name='rootHandler'>
/// Optional. The http client handler used to handle http transport.
/// </param>
/// <param name='handlers'>
/// Optional. The delegating handlers to add to the http client pipeline.
/// </param>
protected AutoRestDurationTestService(HttpClientHandler rootHandler, params DelegatingHandler[] handlers) : base(rootHandler, handlers)
{
this.Initialize();
}
/// <summary>
/// Initializes a new instance of the AutoRestDurationTestService class.
/// </summary>
/// <param name='baseUri'>
/// Optional. The base URI of the service.
/// </param>
/// <param name='handlers'>
/// Optional. The delegating handlers to add to the http client pipeline.
/// </param>
protected AutoRestDurationTestService(Uri baseUri, params DelegatingHandler[] handlers) : this(handlers)
{
if (baseUri == null)
{
throw new ArgumentNullException("baseUri");
}
this.BaseUri = baseUri;
}
/// <summary>
/// Initializes a new instance of the AutoRestDurationTestService class.
/// </summary>
/// <param name='baseUri'>
/// Optional. The base URI of the service.
/// </param>
/// <param name='rootHandler'>
/// Optional. The http client handler used to handle http transport.
/// </param>
/// <param name='handlers'>
/// Optional. The delegating handlers to add to the http client pipeline.
/// </param>
protected AutoRestDurationTestService(Uri baseUri, HttpClientHandler rootHandler, params DelegatingHandler[] handlers) : this(rootHandler, handlers)
{
if (baseUri == null)
{
throw new ArgumentNullException("baseUri");
}
this.BaseUri = baseUri;
}
/// <summary>
/// Initializes a new instance of the AutoRestDurationTestService class.
/// </summary>
/// <param name='credentials'>
/// Required. Gets Azure subscription credentials.
/// </param>
/// <param name='handlers'>
/// Optional. The delegating handlers to add to the http client pipeline.
/// </param>
public AutoRestDurationTestService(ServiceClientCredentials credentials, params DelegatingHandler[] handlers) : this(handlers)
{
if (credentials == null)
{
throw new ArgumentNullException("credentials");
}
this.Credentials = credentials;
if (this.Credentials != null)
{
this.Credentials.InitializeServiceClient(this);
}
}
/// <summary>
/// Initializes a new instance of the AutoRestDurationTestService class.
/// </summary>
/// <param name='credentials'>
/// Required. Gets Azure subscription credentials.
/// </param>
/// <param name='rootHandler'>
/// Optional. The http client handler used to handle http transport.
/// </param>
/// <param name='handlers'>
/// Optional. The delegating handlers to add to the http client pipeline.
/// </param>
public AutoRestDurationTestService(ServiceClientCredentials credentials, HttpClientHandler rootHandler, params DelegatingHandler[] handlers) : this(rootHandler, handlers)
{
if (credentials == null)
{
throw new ArgumentNullException("credentials");
}
this.Credentials = credentials;
if (this.Credentials != null)
{
this.Credentials.InitializeServiceClient(this);
}
}
/// <summary>
/// Initializes a new instance of the AutoRestDurationTestService class.
/// </summary>
/// <param name='baseUri'>
/// Optional. The base URI of the service.
/// </param>
/// <param name='credentials'>
/// Required. Gets Azure subscription credentials.
/// </param>
/// <param name='handlers'>
/// Optional. The delegating handlers to add to the http client pipeline.
/// </param>
public AutoRestDurationTestService(Uri baseUri, ServiceClientCredentials credentials, params DelegatingHandler[] handlers) : this(handlers)
{
if (baseUri == null)
{
throw new ArgumentNullException("baseUri");
}
if (credentials == null)
{
throw new ArgumentNullException("credentials");
}
this.BaseUri = baseUri;
this.Credentials = credentials;
if (this.Credentials != null)
{
this.Credentials.InitializeServiceClient(this);
}
}
/// <summary>
/// Initializes a new instance of the AutoRestDurationTestService class.
/// </summary>
/// <param name='baseUri'>
/// Optional. The base URI of the service.
/// </param>
/// <param name='credentials'>
/// Required. Gets Azure subscription credentials.
/// </param>
/// <param name='rootHandler'>
/// Optional. The http client handler used to handle http transport.
/// </param>
/// <param name='handlers'>
/// Optional. The delegating handlers to add to the http client pipeline.
/// </param>
public AutoRestDurationTestService(Uri baseUri, ServiceClientCredentials credentials, HttpClientHandler rootHandler, params DelegatingHandler[] handlers) : this(rootHandler, handlers)
{
if (baseUri == null)
{
throw new ArgumentNullException("baseUri");
}
if (credentials == null)
{
throw new ArgumentNullException("credentials");
}
this.BaseUri = baseUri;
this.Credentials = credentials;
if (this.Credentials != null)
{
this.Credentials.InitializeServiceClient(this);
}
}
/// <summary>
/// Initializes client properties.
/// </summary>
private void Initialize()
{
this.Duration = new DurationOperations(this);
this.BaseUri = new Uri("https://localhost");
this.AcceptLanguage = "en-US";
this.LongRunningOperationRetryTimeout = 30;
this.GenerateClientRequestId = true;
SerializationSettings = new JsonSerializerSettings
{
Formatting = Formatting.Indented,
DateFormatHandling = DateFormatHandling.IsoDateFormat,
DateTimeZoneHandling = DateTimeZoneHandling.Utc,
NullValueHandling = NullValueHandling.Ignore,
ReferenceLoopHandling = ReferenceLoopHandling.Serialize,
ContractResolver = new ReadOnlyJsonContractResolver(),
Converters = new List<JsonConverter>
{
new Iso8601TimeSpanConverter()
}
};
DeserializationSettings = new JsonSerializerSettings
{
DateFormatHandling = DateFormatHandling.IsoDateFormat,
DateTimeZoneHandling = DateTimeZoneHandling.Utc,
NullValueHandling = NullValueHandling.Ignore,
ReferenceLoopHandling = ReferenceLoopHandling.Serialize,
ContractResolver = new ReadOnlyJsonContractResolver(),
Converters = new List<JsonConverter>
{
new Iso8601TimeSpanConverter()
}
};
DeserializationSettings.Converters.Add(new CloudErrorJsonConverter());
}
}
}
| 1 | 21,868 | "Gets the JSON serialization settings." would be better? | Azure-autorest | java |
@@ -38,6 +38,15 @@ func (s *server) setupRouting() {
handle(router, "/bytes", jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.bytesUploadHandler),
})
+
+ handle(router, "/file", jsonhttp.MethodHandler{
+ "POST": http.HandlerFunc(s.bzzFileUploadHandler),
+ })
+
+ handle(router, "/file/{addr}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.bzzFileDownloadHandler),
+ })
+
handle(router, "/bytes/{address}", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.bytesGetHandler),
}) | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api
import (
"fmt"
"net/http"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/logging"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
"resenje.org/web"
)
func (s *server) setupRouting() {
apiVersion := "v1" // only one api version exists, this should be configurable with more
handle := func(router *mux.Router, path string, handler http.Handler) {
router.Handle(path, handler)
router.Handle("/"+apiVersion+path, handler)
}
router := mux.NewRouter()
router.NotFoundHandler = http.HandlerFunc(jsonhttp.NotFoundHandler)
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Ethereum Swarm Bee")
})
router.HandleFunc("/robots.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "User-agent: *\nDisallow: /")
})
handle(router, "/bytes", jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.bytesUploadHandler),
})
handle(router, "/bytes/{address}", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.bytesGetHandler),
})
handle(router, "/chunks/{addr}", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.chunkGetHandler),
"POST": http.HandlerFunc(s.chunkUploadHandler),
})
router.Handle("/bzz-tag/name/{name}", jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.CreateTag),
})
router.Handle("/bzz-tag/uuid/{uuid}", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.getTagInfoUsingUUid),
})
s.Handler = web.ChainHandlers(
logging.NewHTTPAccessLogHandler(s.Logger, logrus.InfoLevel, "api access"),
handlers.CompressHandler,
// todo: add recovery handler
s.pageviewMetricsHandler,
web.FinalHandler(router),
)
}
| 1 | 10,496 | The endpoint should be in plural `/files`. | ethersphere-bee | go |
@@ -141,6 +141,9 @@ AtomPDBResidueInfo *AtomGetPDBResidueInfo(Atom *atom) {
return (AtomPDBResidueInfo *)res;
}
+struct MDLDummy {};
+struct DaylightDummy {};
+
// FIX: is there any reason at all to not just prevent the construction of
// Atoms?
std::string atomClassDoc = | 1 | // $Id$
//
// Copyright (C) 2003-2013 Greg Landrum and Rational Discovery LLC
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#define NO_IMPORT_ARRAY
#include <RDBoost/python.h>
#include <string>
#include <GraphMol/RDKitBase.h>
#include <GraphMol/QueryAtom.h>
#include <GraphMol/MonomerInfo.h>
#include <RDGeneral/types.h>
#include <Geometry/point.h>
#include <GraphMol/SmilesParse/SmilesWrite.h>
#include <GraphMol/SmilesParse/SmartsWrite.h>
#include <RDBoost/Wrap.h>
#include "seqs.hpp"
#include "props.hpp"
#include <algorithm>
namespace python = boost::python;
namespace RDKit {
namespace {
std::string qhelper(Atom::QUERYATOM_QUERY *q, unsigned int depth) {
std::string res = "";
if (q) {
for (unsigned int i = 0; i < depth; ++i) res += " ";
res += q->getFullDescription() + "\n";
for (Atom::QUERYATOM_QUERY::CHILD_VECT_CI ci = q->beginChildren();
ci != q->endChildren(); ++ci) {
res += qhelper((*ci).get(), depth + 1);
}
}
return res;
}
} // end of local namespace
std::string describeQuery(const Atom *atom) {
std::string res = "";
if (atom->hasQuery()) {
res = qhelper(atom->getQuery(), 0);
}
return res;
}
void expandQuery(QueryAtom *self, const QueryAtom *other,
Queries::CompositeQueryType how, bool maintainOrder) {
if (other->hasQuery()) {
const QueryAtom::QUERYATOM_QUERY *qry = other->getQuery();
self->expandQuery(qry->copy(), how, maintainOrder);
}
}
template <class T>
void AtomSetProp(const Atom *atom, const char *key, const T &val) {
// std::cerr<<"asp: "<<atom<<" " << key<<" - " << val << std::endl;
atom->setProp<T>(key, val);
}
int AtomHasProp(const Atom *atom, const char *key) {
// std::cerr<<"ahp: "<<atom<<" " << key<< std::endl;
int res = atom->hasProp(key);
return res;
}
void AtomClearProp(const Atom *atom, const char *key) {
if (!atom->hasProp(key)) {
return;
}
atom->clearProp(key);
}
python::tuple AtomGetNeighbors(Atom *atom) {
python::list res;
const ROMol *parent = &atom->getOwningMol();
ROMol::ADJ_ITER begin, end;
boost::tie(begin, end) = parent->getAtomNeighbors(atom);
while (begin != end) {
res.append(python::ptr(parent->getAtomWithIdx(*begin)));
begin++;
}
return python::tuple(res);
}
python::tuple AtomGetBonds(Atom *atom) {
python::list res;
const ROMol *parent = &atom->getOwningMol();
ROMol::OEDGE_ITER begin, end;
boost::tie(begin, end) = parent->getAtomBonds(atom);
while (begin != end) {
Bond *tmpB = (*parent)[*begin].get();
res.append(python::ptr(tmpB));
begin++;
}
return python::tuple(res);
}
bool AtomIsInRing(const Atom *atom) {
if (!atom->getOwningMol().getRingInfo()->isInitialized()) {
MolOps::findSSSR(atom->getOwningMol());
}
return atom->getOwningMol().getRingInfo()->numAtomRings(atom->getIdx()) != 0;
}
bool AtomIsInRingSize(const Atom *atom, int size) {
if (!atom->getOwningMol().getRingInfo()->isInitialized()) {
MolOps::findSSSR(atom->getOwningMol());
}
return atom->getOwningMol().getRingInfo()->isAtomInRingOfSize(atom->getIdx(),
size);
}
std::string AtomGetSmarts(const Atom *atom) {
std::string res;
if (atom->hasQuery()) {
res = SmartsWrite::GetAtomSmarts(static_cast<const QueryAtom *>(atom));
} else {
res = SmilesWrite::GetAtomSmiles(atom);
}
return res;
}
void SetAtomMonomerInfo(Atom *atom, const AtomMonomerInfo *info) {
atom->setMonomerInfo(info->copy());
}
AtomMonomerInfo *AtomGetMonomerInfo(Atom *atom) {
return atom->getMonomerInfo();
}
AtomPDBResidueInfo *AtomGetPDBResidueInfo(Atom *atom) {
AtomMonomerInfo *res = atom->getMonomerInfo();
if (!res) return NULL;
if (res->getMonomerType() != AtomMonomerInfo::PDBRESIDUE) {
throw_value_error("MonomerInfo is not a PDB Residue");
}
return (AtomPDBResidueInfo *)res;
}
// FIX: is there any reason at all to not just prevent the construction of
// Atoms?
std::string atomClassDoc =
"The class to store Atoms.\n\
Note that, though it is possible to create one, having an Atom on its own\n\
(i.e not associated with a molecule) is not particularly useful.\n";
struct atom_wrapper {
static void wrap() {
python::class_<Atom>("Atom", atomClassDoc.c_str(),
python::init<std::string>())
.def(python::init<unsigned int>(
"Constructor, takes either an int (atomic number) or a string "
"(atomic symbol).\n"))
.def("GetAtomicNum", &Atom::getAtomicNum, "Returns the atomic number.")
.def("SetAtomicNum", &Atom::setAtomicNum,
"Sets the atomic number, takes an integer value as an argument")
.def("GetSymbol", &Atom::getSymbol,
"Returns the atomic symbol (a string)\n")
.def("GetIdx", &Atom::getIdx,
"Returns the atom's index (ordering in the molecule)\n")
.def("GetDegree", &Atom::getDegree,
"Returns the degree of the atom in the molecule.\n\n"
" The degree of an atom is defined to be its number of\n"
" directly-bonded neighbors.\n"
" The degree is independent of bond orders, but is dependent\n"
" on whether or not Hs are explicit in the graph.\n")
.def("GetTotalDegree", &Atom::getTotalDegree,
"Returns the degree of the atom in the molecule including Hs.\n\n"
" The degree of an atom is defined to be its number of\n"
" directly-bonded neighbors.\n"
" The degree is independent of bond orders.\n")
.def("GetTotalNumHs", &Atom::getTotalNumHs,
(python::arg("self"), python::arg("includeNeighbors") = false),
"Returns the total number of Hs (explicit and implicit) on the "
"atom.\n\n"
" ARGUMENTS:\n\n"
" - includeNeighbors: (optional) toggles inclusion of "
"neighboring H atoms in the sum.\n"
" Defaults to 0.\n")
.def("GetNumImplicitHs", &Atom::getNumImplicitHs,
"Returns the total number of implicit Hs on the atom.\n")
.def("GetExplicitValence", &Atom::getExplicitValence,
"Returns the number of explicit Hs on the atom.\n")
.def("GetImplicitValence", &Atom::getImplicitValence,
"Returns the number of implicit Hs on the atom.\n")
.def("GetTotalValence", &Atom::getTotalValence,
"Returns the total valence (explicit + implicit) of the atom.\n\n")
.def("GetFormalCharge", &Atom::getFormalCharge)
.def("SetFormalCharge", &Atom::setFormalCharge)
.def("SetNoImplicit", &Atom::setNoImplicit,
"Sets a marker on the atom that *disallows* implicit Hs.\n"
" This holds even if the atom would otherwise have implicit Hs "
"added.\n")
.def("GetNoImplicit", &Atom::getNoImplicit,
"Returns whether or not the atom is *allowed* to have implicit "
"Hs.\n")
.def("SetNumExplicitHs", &Atom::setNumExplicitHs)
.def("GetNumExplicitHs", &Atom::getNumExplicitHs)
.def("SetIsAromatic", &Atom::setIsAromatic)
.def("GetIsAromatic", &Atom::getIsAromatic)
.def("GetMass", &Atom::getMass)
.def("SetIsotope", &Atom::setIsotope)
.def("GetIsotope", &Atom::getIsotope)
.def("SetNumRadicalElectrons", &Atom::setNumRadicalElectrons)
.def("GetNumRadicalElectrons", &Atom::getNumRadicalElectrons)
.def("SetChiralTag", &Atom::setChiralTag)
.def("InvertChirality", &Atom::invertChirality)
.def("GetChiralTag", &Atom::getChiralTag)
.def("SetHybridization", &Atom::setHybridization,
"Sets the hybridization of the atom.\n"
" The argument should be a HybridizationType\n")
.def("GetHybridization", &Atom::getHybridization,
"Returns the atom's hybridization.\n")
.def("GetOwningMol", &Atom::getOwningMol,
"Returns the Mol that owns this atom.\n",
python::return_value_policy<python::reference_existing_object>())
.def("GetNeighbors", AtomGetNeighbors,
"Returns a read-only sequence of the atom's neighbors\n")
.def("GetBonds", AtomGetBonds,
"Returns a read-only sequence of the atom's bonds\n")
.def("Match", (bool (Atom::*)(const Atom *) const) & Atom::Match,
"Returns whether or not this atom matches another Atom.\n\n"
" Each Atom (or query Atom) has a query function which is\n"
" used for this type of matching.\n\n"
" ARGUMENTS:\n"
" - other: the other Atom to which to compare\n")
.def("IsInRingSize", AtomIsInRingSize,
"Returns whether or not the atom is in a ring of a particular "
"size.\n\n"
" ARGUMENTS:\n"
" - size: the ring size to look for\n")
.def("IsInRing", AtomIsInRing,
"Returns whether or not the atom is in a ring\n\n")
.def("HasQuery", &Atom::hasQuery,
"Returns whether or not the atom has an associated query\n\n")
.def("DescribeQuery", describeQuery,
"returns a text description of the query. Primarily intended for "
"debugging purposes.\n\n")
.def("GetSmarts", AtomGetSmarts,
"returns the SMARTS (or SMILES) string for an Atom\n\n")
// properties
.def("SetProp", AtomSetProp<std::string>,
(python::arg("self"), python::arg("key"), python::arg("val")),
"Sets an atomic property\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to be set (a string).\n"
" - value: the property value (a string).\n\n")
.def("GetProp", GetProp<Atom, std::string>,
"Returns the value of the property.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to return (a string).\n\n"
" RETURNS: a string\n\n"
" NOTE:\n"
" - If the property has not been set, a KeyError exception "
"will be raised.\n")
.def("SetIntProp", AtomSetProp<int>,
(python::arg("self"), python::arg("key"), python::arg("val")),
"Sets an atomic property\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to be set (a int).\n"
" - value: the property value (a int).\n\n")
.def("SetUnsignedProp", AtomSetProp<unsigned>,
(python::arg("self"), python::arg("key"), python::arg("val")),
"Sets an atomic property\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to be set (an unsigned "
"integer).\n"
" - value: the property value (a int >= 0).\n\n")
.def("GetIntProp", GetProp<Atom, int>,
"Returns the value of the property.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to return (an int).\n\n"
" RETURNS: an int\n\n"
" NOTE:\n"
" - If the property has not been set, a KeyError exception "
"will be raised.\n")
.def("GetUnsignedProp", GetProp<Atom, unsigned>,
"Returns the value of the property.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to return (an unsigned "
"integer).\n\n"
" RETURNS: an integer (Python has no unsigned type)\n\n"
" NOTE:\n"
" - If the property has not been set, a KeyError exception "
"will be raised.\n")
.def("SetDoubleProp", AtomSetProp<double>,
(python::arg("self"), python::arg("key"), python::arg("val")),
"Sets an atomic property\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to be set (a double).\n"
" - value: the property value (a double).\n\n")
.def("GetDoubleProp", GetProp<Atom, double>,
"Returns the value of the property.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to return (a double).\n\n"
" RETURNS: a double\n\n"
" NOTE:\n"
" - If the property has not been set, a KeyError exception "
"will be raised.\n")
.def("SetBoolProp", AtomSetProp<bool>,
(python::arg("self"), python::arg("key"), python::arg("val")),
"Sets an atomic property\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to be set (a bool).\n"
" - value: the property value (a bool).\n\n")
.def("GetBoolProp", GetProp<Atom, bool>,
"Returns the value of the property.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to return (a bool).\n\n"
" RETURNS: a bool\n\n"
" NOTE:\n"
" - If the property has not been set, a KeyError exception "
"will be raised.\n")
.def("HasProp", AtomHasProp,
"Queries a Atom to see if a particular property has been "
"assigned.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to check for (a string).\n")
.def("ClearProp", AtomClearProp,
"Removes a particular property from an Atom (does nothing if not "
"already set).\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to be removed.\n")
.def("GetPropNames", &Atom::getPropList,
(python::arg("self"), python::arg("includePrivate") = false,
python::arg("includeComputed") = false),
"Returns a list of the properties set on the Atom.\n\n")
.def("GetPropsAsDict", GetPropsAsDict<Atom>,
(python::arg("self"), python::arg("includePrivate") = true,
python::arg("includeComputed") = true),
"Returns a dictionary of the properties set on the Atom.\n"
" n.b. some properties cannot be converted to python types.\n")
.def("UpdatePropertyCache", &Atom::updatePropertyCache,
(python::arg("self"), python::arg("strict") = true),
"Regenerates computed properties like implicit valence and ring "
"information.\n\n")
.def("NeedsUpdatePropertyCache", &Atom::needsUpdatePropertyCache,
(python::arg("self")),
"Returns true or false depending on whether implicit and explicit "
"valence of the molecule have already been calculated.\n\n")
.def("GetMonomerInfo", AtomGetMonomerInfo,
python::return_internal_reference<
1, python::with_custodian_and_ward_postcall<0, 1> >(),
"Returns the atom's MonomerInfo object, if there is one.\n\n")
.def("GetPDBResidueInfo", AtomGetPDBResidueInfo,
python::return_internal_reference<
1, python::with_custodian_and_ward_postcall<0, 1> >(),
"Returns the atom's MonomerInfo object, if there is one.\n\n")
.def("SetMonomerInfo", SetAtomMonomerInfo,
"Sets the atom's MonomerInfo object.\n\n");
python::enum_<Atom::HybridizationType>("HybridizationType")
.value("UNSPECIFIED", Atom::UNSPECIFIED)
.value("SP", Atom::SP)
.value("SP2", Atom::SP2)
.value("SP3", Atom::SP3)
.value("SP3D", Atom::SP3D)
.value("SP3D2", Atom::SP3D2)
.value("OTHER", Atom::OTHER);
python::enum_<Atom::ChiralType>("ChiralType")
.value("CHI_UNSPECIFIED", Atom::CHI_UNSPECIFIED)
.value("CHI_TETRAHEDRAL_CW", Atom::CHI_TETRAHEDRAL_CW)
.value("CHI_TETRAHEDRAL_CCW", Atom::CHI_TETRAHEDRAL_CCW)
.value("CHI_OTHER", Atom::CHI_OTHER)
.export_values();
;
python::enum_<Queries::CompositeQueryType>("CompositeQueryType")
.value("COMPOSITE_AND", Queries::COMPOSITE_AND)
.value("COMPOSITE_OR", Queries::COMPOSITE_OR)
.value("COMPOSITE_XOR", Queries::COMPOSITE_XOR)
.export_values();
;
atomClassDoc =
"The class to store QueryAtoms.\n\
These cannot currently be constructed directly from Python\n";
python::class_<QueryAtom, python::bases<Atom> >(
"QueryAtom", atomClassDoc.c_str(), python::no_init)
.def("ExpandQuery", expandQuery,
(python::arg("self"), python::arg("other"),
python::arg("how") = Queries::COMPOSITE_AND,
python::arg("maintainOrder") = true),
"combines the query from other with ours");
};
};
} // end of namespace
void wrap_atom() { RDKit::atom_wrapper::wrap(); }
| 1 | 15,299 | ??? Why do we need to have a different API in Python? Oh, wait, I see what you did. It's to fake the namespaces. Given that I don't think the namespaces are necessary, and that less of these should be exposed anyway, I think these should go. | rdkit-rdkit | cpp |
@@ -40,7 +40,7 @@ func InitContext(contextType string) {
})
}
-func GetContext() gocontext.Context{
+func GetContext() gocontext.Context {
return context.ctx
}
func Done() <-chan struct{} { | 1 | package context
import (
gocontext "context"
"sync"
"time"
"k8s.io/klog"
"github.com/kubeedge/beehive/pkg/core/model"
)
// define channel type
const (
MsgCtxTypeChannel = "channel"
)
var (
// singleton
context *beehiveContext
once sync.Once
)
// InitContext gets global context instance
func InitContext(contextType string) {
once.Do(func() {
ctx, cancel := gocontext.WithCancel(gocontext.Background())
context = &beehiveContext{
ctx: ctx,
cancel: cancel,
}
switch contextType {
case MsgCtxTypeChannel:
channelContext := NewChannelContext()
context.messageContext = channelContext
context.moduleContext = channelContext
default:
klog.Fatalf("Do not support context type:%s", contextType)
}
})
}
func GetContext() gocontext.Context{
return context.ctx
}
func Done() <-chan struct{} {
return context.ctx.Done()
}
// AddModule adds module into module context
func AddModule(module string) {
context.moduleContext.AddModule(module)
}
// AddModuleGroup adds module into module context group
func AddModuleGroup(module, group string) {
context.moduleContext.AddModuleGroup(module, group)
}
// Cancel function
func Cancel() {
context.cancel()
}
// Cleanup cleans up module
func Cleanup(module string) {
context.moduleContext.Cleanup(module)
}
// Send the message
func Send(module string, message model.Message) {
context.messageContext.Send(module, message)
}
// Receive the message
// module : local module name
func Receive(module string) (model.Message, error) {
message, err := context.messageContext.Receive(module)
if err == nil {
return message, nil
}
klog.Warningf("Receive: failed to receive message, error:%v", err)
return message, err
}
// SendSync sends message in sync mode
// module: the destination of the message
// timeout: if <= 0 using default value(30s)
func SendSync(module string,
message model.Message, timeout time.Duration) (model.Message, error) {
resp, err := context.messageContext.SendSync(module, message, timeout)
if err == nil {
return resp, nil
}
return model.Message{}, err
}
// SendResp sends response
// please get resp message using model.NewRespByMessage
func SendResp(resp model.Message) {
context.messageContext.SendResp(resp)
}
// SendToGroup broadcasts the message to all of group members
func SendToGroup(moduleType string, message model.Message) {
context.messageContext.SendToGroup(moduleType, message)
}
// sendToGroupSync broadcasts the message to all of group members in sync mode
func sendToGroupSync(moduleType string, message model.Message, timeout time.Duration) error {
return context.messageContext.SendToGroupSync(moduleType, message, timeout)
}
| 1 | 16,401 | I think this line is gofmt issue? @daixiang0 | kubeedge-kubeedge | go |
@@ -94,6 +94,16 @@ func TestSubrepoLabel(t *testing.T) {
assert.EqualValues(t, BuildLabel{PackageName: "", Name: ""}, label.SubrepoLabel())
}
+func TestParseBuildLabelParts(t *testing.T) {
+ target1 := "@unittest_cpp//:unittest_cpp"
+ targetNewSyntax := "@unittest_cpp"
+ pkg, name, subrepo := parseBuildLabelParts(target1, "/", nil)
+ pkg2, name2, subrepo2 := parseBuildLabelParts(targetNewSyntax, "/", nil)
+ assert.Equal(t, pkg, pkg2)
+ assert.Equal(t, name, name2)
+ assert.Equal(t, subrepo, subrepo2)
+}
+
func TestMain(m *testing.M) {
// Used to support TestComplete, the function it's testing re-execs
// itself thinking that it's actually plz. | 1 | package core
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestIncludes(t *testing.T) {
label1 := BuildLabel{PackageName: "src/core", Name: "..."}
label2 := BuildLabel{PackageName: "src/parse", Name: "parse"}
assert.False(t, label1.Includes(label2))
label2 = BuildLabel{PackageName: "src/core", Name: "core_test"}
assert.True(t, label1.Includes(label2))
}
func TestIncludesSubstring(t *testing.T) {
label1 := BuildLabel{PackageName: "third_party/python", Name: "..."}
label2 := BuildLabel{PackageName: "third_party/python3", Name: "six"}
assert.False(t, label1.Includes(label2))
}
func TestIncludesSubpackages(t *testing.T) {
label1 := BuildLabel{PackageName: "", Name: "..."}
label2 := BuildLabel{PackageName: "third_party/python3", Name: "six"}
assert.True(t, label1.Includes(label2))
}
func TestParent(t *testing.T) {
label := BuildLabel{PackageName: "src/core", Name: "core"}
assert.Equal(t, label, label.Parent())
label2 := BuildLabel{PackageName: "src/core", Name: "_core#src"}
assert.Equal(t, label, label2.Parent())
label3 := BuildLabel{PackageName: "src/core", Name: "_core_something"}
assert.Equal(t, label3, label3.Parent())
}
func TestUnmarshalFlag(t *testing.T) {
var label BuildLabel
assert.NoError(t, label.UnmarshalFlag("//src/core:core"))
assert.Equal(t, label.PackageName, "src/core")
assert.Equal(t, label.Name, "core")
// N.B. we can't test a failure here because it does a log.Fatalf
}
func TestUnmarshalText(t *testing.T) {
var label BuildLabel
assert.NoError(t, label.UnmarshalText([]byte("//src/core:core")))
assert.Equal(t, label.PackageName, "src/core")
assert.Equal(t, label.Name, "core")
assert.Error(t, label.UnmarshalText([]byte(":blahblah:")))
}
func TestPackageDir(t *testing.T) {
label := NewBuildLabel("src/core", "core")
assert.Equal(t, "src/core", label.PackageDir())
label = NewBuildLabel("", "core")
assert.Equal(t, ".", label.PackageDir())
}
func TestLooksLikeABuildLabel(t *testing.T) {
assert.True(t, LooksLikeABuildLabel("//src/core"))
assert.True(t, LooksLikeABuildLabel(":core"))
assert.True(t, LooksLikeABuildLabel("@test_x86:core"))
assert.False(t, LooksLikeABuildLabel("core"))
assert.False(t, LooksLikeABuildLabel("@test_x86"))
}
func TestComplete(t *testing.T) {
label := BuildLabel{}
completions := label.Complete("//src/c")
assert.Equal(t, 4, len(completions))
assert.Equal(t, "//src/cache", completions[0].Item)
assert.Equal(t, "//src/clean", completions[1].Item)
assert.Equal(t, "//src/cli", completions[2].Item)
assert.Equal(t, "//src/core", completions[3].Item)
}
func TestCompleteError(t *testing.T) {
label := BuildLabel{}
completions := label.Complete("nope")
assert.Equal(t, 0, len(completions))
}
func TestSubrepoLabel(t *testing.T) {
label := BuildLabel{Subrepo: "test"}
assert.EqualValues(t, BuildLabel{PackageName: "", Name: "test"}, label.SubrepoLabel())
label.Subrepo = "package/test"
assert.EqualValues(t, BuildLabel{PackageName: "package", Name: "test"}, label.SubrepoLabel())
// This isn't really valid (the caller shouldn't need to call it in such a case)
// but we want to make sure it doesn't panic.
label.Subrepo = ""
assert.EqualValues(t, BuildLabel{PackageName: "", Name: ""}, label.SubrepoLabel())
}
func TestMain(m *testing.M) {
// Used to support TestComplete, the function it's testing re-execs
// itself thinking that it's actually plz.
if complete := os.Getenv("PLZ_COMPLETE"); complete == "//src/c" {
os.Stdout.Write([]byte("//src/cache\n"))
os.Stdout.Write([]byte("//src/clean\n"))
os.Stdout.Write([]byte("//src/cli\n"))
os.Stdout.Write([]byte("//src/core\n"))
os.Exit(0)
} else if complete != "" {
os.Stderr.Write([]byte("Invalid completion\n"))
os.Exit(1)
}
os.Exit(m.Run())
}
| 1 | 8,305 | probably better to assert the values directly; technically you could pass this test with an implementation that always returned "" for the subrepo for example. | thought-machine-please | go |
@@ -46,7 +46,7 @@ func addTestingTsfBlocks(bc Blockchain) error {
big.NewInt(10),
)
pubk, _ := keypair.DecodePublicKey(Gen.CreatorPubKey)
- sig, _ := hex.DecodeString("3584fe777dd090e1a7a825896f532485ea2cc35d7c300c6c56f0e2e78b51c6ded33f7d0069f4f6f6b6762306466fcff6f261bb30d9e1550f2f8be4f988e740903fd734209cb60101")
+ sig, _ := hex.DecodeString("9c1fb14affb398f850d0642f22f12433526bed742fbfb39115f9df2549b2751347bafe9ddbe50e6f02906bdc83b7c905944adc19583726dfaea83245318132ff01")
bd := &action.EnvelopeBuilder{}
elp := bd.SetAction(tsf0).
SetDestinationAddress(ta.Addrinfo["producer"].Bech32()). | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockchain
import (
"context"
"encoding/hex"
"fmt"
"math/big"
"sync"
"testing"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
"github.com/iotexproject/iotex-core/action/protocol/vote"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/state/factory"
ta "github.com/iotexproject/iotex-core/test/testaddress"
"github.com/iotexproject/iotex-core/testutil"
)
const (
testDBPath = "db.test"
testTriePath = "trie.test"
)
func addTestingTsfBlocks(bc Blockchain) error {
// Add block 0
tsf0, _ := action.NewTransfer(
1,
big.NewInt(3000000000),
Gen.CreatorAddr(),
ta.Addrinfo["producer"].Bech32(),
[]byte{}, uint64(100000),
big.NewInt(10),
)
pubk, _ := keypair.DecodePublicKey(Gen.CreatorPubKey)
sig, _ := hex.DecodeString("3584fe777dd090e1a7a825896f532485ea2cc35d7c300c6c56f0e2e78b51c6ded33f7d0069f4f6f6b6762306466fcff6f261bb30d9e1550f2f8be4f988e740903fd734209cb60101")
bd := &action.EnvelopeBuilder{}
elp := bd.SetAction(tsf0).
SetDestinationAddress(ta.Addrinfo["producer"].Bech32()).
SetNonce(1).
SetGasLimit(100000).
SetGasPrice(big.NewInt(10)).Build()
selp := action.AssembleSealedEnvelope(elp, Gen.CreatorAddr(), pubk, sig)
actionMap := make(map[string][]action.SealedEnvelope)
actionMap[selp.SrcAddr()] = []action.SealedEnvelope{selp}
blk, err := bc.MintNewBlock(
actionMap,
ta.Keyinfo["producer"].PubKey,
ta.Keyinfo["producer"].PriKey,
ta.Addrinfo["producer"].Bech32(),
0,
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk, true); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
addr0 := ta.Addrinfo["producer"].Bech32()
priKey0 := ta.Keyinfo["producer"].PriKey
addr1 := ta.Addrinfo["alfa"].Bech32()
priKey1 := ta.Keyinfo["alfa"].PriKey
addr2 := ta.Addrinfo["bravo"].Bech32()
addr3 := ta.Addrinfo["charlie"].Bech32()
priKey3 := ta.Keyinfo["charlie"].PriKey
addr4 := ta.Addrinfo["delta"].Bech32()
priKey4 := ta.Keyinfo["delta"].PriKey
addr5 := ta.Addrinfo["echo"].Bech32()
priKey5 := ta.Keyinfo["echo"].PriKey
addr6 := ta.Addrinfo["foxtrot"].Bech32()
// Add block 1
// test --> A, B, C, D, E, F
tsf1, err := testutil.SignedTransfer(addr0, addr1, priKey0, 1, big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf2, err := testutil.SignedTransfer(addr0, addr2, priKey0, 2, big.NewInt(30), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf3, err := testutil.SignedTransfer(addr0, addr3, priKey0, 3, big.NewInt(50), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf4, err := testutil.SignedTransfer(addr0, addr4, priKey0, 4, big.NewInt(70), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf5, err := testutil.SignedTransfer(addr0, addr5, priKey0, 5, big.NewInt(110), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf6, err := testutil.SignedTransfer(addr0, addr6, priKey0, 6, big.NewInt(50<<20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
accMap := make(map[string][]action.SealedEnvelope)
accMap[tsf1.SrcAddr()] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4, tsf5, tsf6}
blk, err = bc.MintNewBlock(
accMap,
ta.Keyinfo["producer"].PubKey,
ta.Keyinfo["producer"].PriKey,
ta.Addrinfo["producer"].Bech32(),
0,
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk, true); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 2
// Charlie --> A, B, D, E, test
tsf1, err = testutil.SignedTransfer(addr3, addr1, priKey3, 1, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf2, err = testutil.SignedTransfer(addr3, addr2, priKey3, 2, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf3, err = testutil.SignedTransfer(addr3, addr4, priKey3, 3, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf4, err = testutil.SignedTransfer(addr3, addr5, priKey3, 4, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf5, err = testutil.SignedTransfer(addr3, addr0, priKey3, 5, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
accMap = make(map[string][]action.SealedEnvelope)
accMap[tsf1.SrcAddr()] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4, tsf5}
blk, err = bc.MintNewBlock(
accMap,
ta.Keyinfo["producer"].PubKey,
ta.Keyinfo["producer"].PriKey,
ta.Addrinfo["producer"].Bech32(),
0,
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk, true); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 3
// Delta --> B, E, F, test
tsf1, err = testutil.SignedTransfer(addr4, addr2, priKey4, 1, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf2, err = testutil.SignedTransfer(addr4, addr5, priKey4, 2, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf3, err = testutil.SignedTransfer(addr4, addr6, priKey4, 3, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf4, err = testutil.SignedTransfer(addr4, addr0, priKey4, 4, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
accMap = make(map[string][]action.SealedEnvelope)
accMap[tsf1.SrcAddr()] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4}
blk, err = bc.MintNewBlock(
accMap,
ta.Keyinfo["producer"].PubKey,
ta.Keyinfo["producer"].PriKey,
ta.Addrinfo["producer"].Bech32(),
0,
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk, true); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 4
// Delta --> A, B, C, D, F, test
tsf1, err = testutil.SignedTransfer(addr5, addr1, priKey5, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf2, err = testutil.SignedTransfer(addr5, addr2, priKey5, 2, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf3, err = testutil.SignedTransfer(addr5, addr3, priKey5, 3, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf4, err = testutil.SignedTransfer(addr5, addr4, priKey5, 4, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf5, err = testutil.SignedTransfer(addr5, addr6, priKey5, 5, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
tsf6, err = testutil.SignedTransfer(addr5, addr0, priKey5, 6, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
vote1, err := testutil.SignedVote(addr3, addr3, priKey3, 6, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
vote2, err := testutil.SignedVote(addr1, addr1, priKey1, 1, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
accMap = make(map[string][]action.SealedEnvelope)
accMap[tsf1.SrcAddr()] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4, tsf5, tsf6}
accMap[vote1.SrcAddr()] = []action.SealedEnvelope{vote1}
accMap[vote2.SrcAddr()] = []action.SealedEnvelope{vote2}
blk, err = bc.MintNewBlock(
accMap,
ta.Keyinfo["producer"].PubKey,
ta.Keyinfo["producer"].PriKey,
ta.Addrinfo["producer"].Bech32(),
0,
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk, true); err != nil {
return err
}
if blk.TxRoot() != blk.CalculateTxRoot() {
return err
}
return bc.CommitBlock(blk)
}
func TestCreateBlockchain(t *testing.T) {
require := require.New(t)
ctx := context.Background()
cfg := config.Default
// disable account-based testing
cfg.Chain.TrieDBPath = ""
// create chain
bc := NewBlockchain(cfg, InMemStateFactoryOption(), InMemDaoOption())
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc))
bc.GetFactory().AddActionHandlers(account.NewProtocol(), vote.NewProtocol(bc))
require.NoError(bc.Start(ctx))
require.NotNil(bc)
height := bc.TipHeight()
require.Equal(0, int(height))
fmt.Printf("Create blockchain pass, height = %d\n", height)
defer func() {
err := bc.Stop(ctx)
require.NoError(err)
}()
// verify Genesis block
genesis, _ := bc.GetBlockByHeight(0)
require.NotNil(genesis)
// serialize
data, err := genesis.Serialize()
require.Nil(err)
transfers, votes, _ := action.ClassifyActions(genesis.Actions)
require.Equal(0, len(transfers))
require.Equal(21, len(votes))
fmt.Printf("Block size match pass\n")
fmt.Printf("Marshaling Block pass\n")
// deserialize
deserialize := block.Block{}
err = deserialize.Deserialize(data)
require.Nil(err)
fmt.Printf("Unmarshaling Block pass\n")
blkhash := genesis.HashBlock()
require.Equal(blkhash, deserialize.HashBlock())
fmt.Printf("Serialize/Deserialize Block hash = %x match\n", blkhash)
blkhash = genesis.CalculateTxRoot()
require.Equal(blkhash, deserialize.CalculateTxRoot())
fmt.Printf("Serialize/Deserialize Block merkle = %x match\n", blkhash)
// add 4 sample blocks
require.Nil(addTestingTsfBlocks(bc))
height = bc.TipHeight()
require.Equal(5, int(height))
}
func TestBlockchain_MintNewBlock(t *testing.T) {
ctx := context.Background()
cfg := config.Default
bc := NewBlockchain(cfg, InMemStateFactoryOption(), InMemDaoOption())
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc))
bc.GetFactory().AddActionHandlers(account.NewProtocol(), vote.NewProtocol(bc))
require.NoError(t, bc.Start(ctx))
defer require.NoError(t, bc.Stop(ctx))
pk, _ := keypair.DecodePublicKey(Gen.CreatorPubKey)
// The signature should only matches the transfer amount 3000000000
sig, err := hex.DecodeString("a270828edf414f652564495ffdb3ed4799ae949cd8f0b8c108f36aff5e2ef0ee03c81f0161ef59c64a4590517d7cecabcec1b43b170fd70b1187f95a57975135951d867fd8c14901")
require.NoError(t, err)
cases := make(map[int64]bool)
cases[0] = true
cases[1] = false
for k, v := range cases {
tsf, err := action.NewTransfer(
1,
big.NewInt(3000000000+k),
Gen.CreatorAddr(),
ta.Addrinfo["producer"].Bech32(),
[]byte{}, uint64(100000),
big.NewInt(10),
)
require.NoError(t, err)
bd := &action.EnvelopeBuilder{}
elp := bd.SetAction(tsf).
SetDestinationAddress(ta.Addrinfo["producer"].Bech32()).
SetNonce(1).
SetGasLimit(100000).
SetGasPrice(big.NewInt(10)).Build()
selp := action.AssembleSealedEnvelope(elp, Gen.CreatorAddr(), pk, sig)
actionMap := make(map[string][]action.SealedEnvelope)
actionMap[selp.SrcAddr()] = []action.SealedEnvelope{selp}
_, err = bc.MintNewBlock(
actionMap,
ta.Keyinfo["producer"].PubKey,
ta.Keyinfo["producer"].PriKey,
ta.Addrinfo["producer"].Bech32(),
0,
)
if v {
require.NoError(t, err)
} else {
require.Error(t, err)
}
}
}
type MockSubscriber struct {
counter int
mu sync.RWMutex
}
func (ms *MockSubscriber) HandleBlock(blk *block.Block) error {
ms.mu.Lock()
tsfs, _, _ := action.ClassifyActions(blk.Actions)
ms.counter += len(tsfs)
ms.mu.Unlock()
return nil
}
func (ms *MockSubscriber) Counter() int {
ms.mu.RLock()
defer ms.mu.RUnlock()
return ms.counter
}
func TestLoadBlockchainfromDB(t *testing.T) {
require := require.New(t)
ctx := context.Background()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
cfg := config.Default
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
cfg.Chain.EnableIndex = true
sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption())
require.Nil(err)
sf.AddActionHandlers(account.NewProtocol())
// Create a blockchain from scratch
bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption())
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc))
sf.AddActionHandlers(vote.NewProtocol(bc))
require.NoError(bc.Start(ctx))
require.NoError(addCreatorToFactory(sf))
ms := &MockSubscriber{counter: 0}
err = bc.AddSubscriber(ms)
require.Nil(err)
require.Equal(0, ms.Counter())
height := bc.TipHeight()
fmt.Printf("Open blockchain pass, height = %d\n", height)
require.Nil(addTestingTsfBlocks(bc))
err = bc.Stop(ctx)
require.NoError(err)
require.Equal(27, ms.Counter())
// Load a blockchain from DB
sf, err = factory.NewFactory(cfg, factory.DefaultTrieOption())
require.Nil(err)
sf.AddActionHandlers(account.NewProtocol())
bc = NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption())
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc))
require.NoError(bc.Start(ctx))
defer func() {
require.NoError(bc.Stop(ctx))
}()
// check hash<-->height mapping
blkhash, err := bc.GetHashByHeight(0)
require.Nil(err)
height, err = bc.GetHeightByHash(blkhash)
require.Nil(err)
require.Equal(uint64(0), height)
blk, err := bc.GetBlockByHash(blkhash)
require.Nil(err)
require.Equal(blkhash, blk.HashBlock())
fmt.Printf("Genesis hash = %x\n", blkhash)
hash1, err := bc.GetHashByHeight(1)
require.Nil(err)
height, err = bc.GetHeightByHash(hash1)
require.Nil(err)
require.Equal(uint64(1), height)
blk, err = bc.GetBlockByHash(hash1)
require.Nil(err)
require.Equal(hash1, blk.HashBlock())
fmt.Printf("block 1 hash = %x\n", hash1)
hash2, err := bc.GetHashByHeight(2)
require.Nil(err)
height, err = bc.GetHeightByHash(hash2)
require.Nil(err)
require.Equal(uint64(2), height)
blk, err = bc.GetBlockByHash(hash2)
require.Nil(err)
require.Equal(hash2, blk.HashBlock())
fmt.Printf("block 2 hash = %x\n", hash2)
hash3, err := bc.GetHashByHeight(3)
require.Nil(err)
height, err = bc.GetHeightByHash(hash3)
require.Nil(err)
require.Equal(uint64(3), height)
blk, err = bc.GetBlockByHash(hash3)
require.Nil(err)
require.Equal(hash3, blk.HashBlock())
fmt.Printf("block 3 hash = %x\n", hash3)
hash4, err := bc.GetHashByHeight(4)
require.Nil(err)
height, err = bc.GetHeightByHash(hash4)
require.Nil(err)
require.Equal(uint64(4), height)
blk, err = bc.GetBlockByHash(hash4)
require.Nil(err)
require.Equal(hash4, blk.HashBlock())
fmt.Printf("block 4 hash = %x\n", hash4)
hash5, err := bc.GetHashByHeight(5)
require.Nil(err)
height, err = bc.GetHeightByHash(hash5)
require.Nil(err)
require.Equal(uint64(5), height)
blk, err = bc.GetBlockByHash(hash5)
require.Nil(err)
require.Equal(hash5, blk.HashBlock())
fmt.Printf("block 5 hash = %x\n", hash5)
empblk, err := bc.GetBlockByHash(hash.ZeroHash32B)
require.Nil(empblk)
require.NotNil(err.Error())
blk, err = bc.GetBlockByHeight(60000)
require.Nil(blk)
require.NotNil(err)
// add wrong blocks
h := bc.TipHeight()
blkhash = bc.TipHash()
blk, err = bc.GetBlockByHeight(h)
require.Nil(err)
require.Equal(blkhash, blk.HashBlock())
fmt.Printf("Current tip = %d hash = %x\n", h, blkhash)
// add block with wrong height
cbTsf := action.NewCoinBaseTransfer(1, big.NewInt(50), ta.Addrinfo["bravo"].Bech32())
require.NotNil(cbTsf)
bd := action.EnvelopeBuilder{}
elp := bd.SetNonce(1).
SetDestinationAddress(ta.Addrinfo["bravo"].Bech32()).
SetGasLimit(genesis.ActionGasLimit).
SetAction(cbTsf).Build()
selp, err := action.Sign(elp, ta.Addrinfo["bravo"].Bech32(), ta.Keyinfo["bravo"].PriKey)
require.NoError(err)
nblk, err := block.NewTestingBuilder().
SetChainID(0).
SetHeight(h+2).
SetPrevBlockHash(blkhash).
SetTimeStamp(testutil.TimestampNow()).
AddActions(selp).SignAndBuild(ta.Keyinfo["bravo"].PubKey, ta.Keyinfo["bravo"].PriKey)
require.NoError(err)
err = bc.ValidateBlock(&nblk, true)
require.NotNil(err)
fmt.Printf("Cannot validate block %d: %v\n", blk.Height(), err)
// add block with zero prev hash
cbTsf2 := action.NewCoinBaseTransfer(1, big.NewInt(50), ta.Addrinfo["bravo"].Bech32())
require.NotNil(cbTsf2)
bd = action.EnvelopeBuilder{}
elp = bd.SetNonce(1).
SetDestinationAddress(ta.Addrinfo["bravo"].Bech32()).
SetGasLimit(genesis.ActionGasLimit).
SetAction(cbTsf2).Build()
selp2, err := action.Sign(elp, ta.Addrinfo["bravo"].Bech32(), ta.Keyinfo["bravo"].PriKey)
require.NoError(err)
nblk, err = block.NewTestingBuilder().
SetChainID(0).
SetHeight(h+1).
SetPrevBlockHash(hash.ZeroHash32B).
SetTimeStamp(testutil.TimestampNow()).
AddActions(selp2).SignAndBuild(ta.Keyinfo["bravo"].PubKey, ta.Keyinfo["bravo"].PriKey)
require.NoError(err)
err = bc.ValidateBlock(&nblk, true)
require.NotNil(err)
fmt.Printf("Cannot validate block %d: %v\n", blk.Height(), err)
// add existing block again will have no effect
blk, err = bc.GetBlockByHeight(3)
require.NotNil(blk)
require.Nil(err)
require.NoError(bc.(*blockchain).commitBlock(blk))
fmt.Printf("Cannot add block 3 again: %v\n", err)
// check all Tx from block 4
blk, err = bc.GetBlockByHeight(5)
require.Nil(err)
require.Equal(hash5, blk.HashBlock())
tsfs, votes, _ := action.ClassifyActions(blk.Actions)
for _, transfer := range tsfs {
transferHash := transfer.Hash()
blkhash, err := bc.GetBlockHashByTransferHash(transferHash)
require.Nil(err)
require.Equal(blkhash, hash5)
transfer1, err := bc.GetTransferByTransferHash(transferHash)
require.Nil(err)
require.Equal(transfer1.Hash(), transferHash)
}
for _, vote := range votes {
voteHash := vote.Hash()
blkhash, err := bc.GetBlockHashByVoteHash(voteHash)
require.Nil(err)
require.Equal(blkhash, hash5)
vote1, err := bc.GetVoteByVoteHash(voteHash)
require.Nil(err)
require.Equal(vote1.Hash(), voteHash)
}
fromTransfers, err := bc.GetTransfersFromAddress(ta.Addrinfo["charlie"].Bech32())
require.Nil(err)
require.Equal(len(fromTransfers), 5)
toTransfers, err := bc.GetTransfersToAddress(ta.Addrinfo["charlie"].Bech32())
require.Nil(err)
require.Equal(len(toTransfers), 2)
fromVotes, err := bc.GetVotesFromAddress(ta.Addrinfo["charlie"].Bech32())
require.Nil(err)
require.Equal(len(fromVotes), 1)
fromVotes, err = bc.GetVotesFromAddress(ta.Addrinfo["alfa"].Bech32())
require.Nil(err)
require.Equal(len(fromVotes), 1)
toVotes, err := bc.GetVotesToAddress(ta.Addrinfo["charlie"].Bech32())
require.Nil(err)
require.Equal(len(toVotes), 1)
toVotes, err = bc.GetVotesToAddress(ta.Addrinfo["alfa"].Bech32())
require.Nil(err)
require.Equal(len(toVotes), 1)
totalTransfers, err := bc.GetTotalTransfers()
require.Nil(err)
require.Equal(totalTransfers, uint64(27))
totalVotes, err := bc.GetTotalVotes()
require.Nil(err)
require.Equal(totalVotes, uint64(23))
_, err = bc.GetTransferByTransferHash(hash.ZeroHash32B)
require.NotNil(err)
_, err = bc.GetVoteByVoteHash(hash.ZeroHash32B)
require.NotNil(err)
_, err = bc.StateByAddr("")
require.NotNil(err)
}
func TestLoadBlockchainfromDBWithoutExplorer(t *testing.T) {
require := require.New(t)
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
ctx := context.Background()
cfg := config.Default
cfg.DB.UseBadgerDB = false // test with boltDB
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption())
require.Nil(err)
sf.AddActionHandlers(account.NewProtocol())
// Create a blockchain from scratch
bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption())
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc))
sf.AddActionHandlers(vote.NewProtocol(bc))
require.NoError(bc.Start(ctx))
require.NoError(addCreatorToFactory(sf))
ms := &MockSubscriber{counter: 0}
err = bc.AddSubscriber(ms)
require.Nil(err)
require.Equal(0, ms.counter)
err = bc.RemoveSubscriber(ms)
require.Nil(err)
height := bc.TipHeight()
fmt.Printf("Open blockchain pass, height = %d\n", height)
require.Nil(addTestingTsfBlocks(bc))
err = bc.Stop(ctx)
require.NoError(err)
require.Equal(0, ms.counter)
// Load a blockchain from DB
sf, err = factory.NewFactory(cfg, factory.DefaultTrieOption())
require.Nil(err)
sf.AddActionHandlers(account.NewProtocol())
bc = NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption())
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc))
require.NoError(bc.Start(ctx))
defer func() {
err := bc.Stop(ctx)
require.NoError(err)
}()
require.NotNil(bc)
// check hash<-->height mapping
blkhash, err := bc.GetHashByHeight(0)
require.Nil(err)
height, err = bc.GetHeightByHash(blkhash)
require.Nil(err)
require.Equal(uint64(0), height)
blk, err := bc.GetBlockByHash(blkhash)
require.Nil(err)
require.Equal(blkhash, blk.HashBlock())
fmt.Printf("Genesis hash = %x\n", blkhash)
hash1, err := bc.GetHashByHeight(1)
require.Nil(err)
height, err = bc.GetHeightByHash(hash1)
require.Nil(err)
require.Equal(uint64(1), height)
blk, err = bc.GetBlockByHash(hash1)
require.Nil(err)
require.Equal(hash1, blk.HashBlock())
fmt.Printf("block 1 hash = %x\n", hash1)
hash2, err := bc.GetHashByHeight(2)
require.Nil(err)
height, err = bc.GetHeightByHash(hash2)
require.Nil(err)
require.Equal(uint64(2), height)
blk, err = bc.GetBlockByHash(hash2)
require.Nil(err)
require.Equal(hash2, blk.HashBlock())
fmt.Printf("block 2 hash = %x\n", hash2)
hash3, err := bc.GetHashByHeight(3)
require.Nil(err)
height, err = bc.GetHeightByHash(hash3)
require.Nil(err)
require.Equal(uint64(3), height)
blk, err = bc.GetBlockByHash(hash3)
require.Nil(err)
require.Equal(hash3, blk.HashBlock())
fmt.Printf("block 3 hash = %x\n", hash3)
hash4, err := bc.GetHashByHeight(4)
require.Nil(err)
height, err = bc.GetHeightByHash(hash4)
require.Nil(err)
require.Equal(uint64(4), height)
blk, err = bc.GetBlockByHash(hash4)
require.Nil(err)
require.Equal(hash4, blk.HashBlock())
fmt.Printf("block 4 hash = %x\n", hash4)
empblk, err := bc.GetBlockByHash(hash.ZeroHash32B)
require.Nil(empblk)
require.NotNil(err.Error())
blk, err = bc.GetBlockByHeight(60000)
require.Nil(blk)
require.NotNil(err)
// add wrong blocks
h := bc.TipHeight()
blkhash = bc.TipHash()
blk, err = bc.GetBlockByHeight(h)
require.Nil(err)
require.Equal(blkhash, blk.HashBlock())
fmt.Printf("Current tip = %d hash = %x\n", h, blkhash)
// add block with wrong height
cbTsf := action.NewCoinBaseTransfer(1, big.NewInt(50), ta.Addrinfo["bravo"].Bech32())
require.NotNil(cbTsf)
bd := &action.EnvelopeBuilder{}
elp := bd.SetNonce(1).
SetDestinationAddress(ta.Addrinfo["bravo"].Bech32()).
SetGasLimit(genesis.ActionGasLimit).
SetAction(cbTsf).Build()
selp, err := action.Sign(elp, ta.Addrinfo["bravo"].Bech32(), ta.Keyinfo["bravo"].PriKey)
require.NoError(err)
nblk, err := block.NewTestingBuilder().
SetChainID(0).
SetHeight(h+2).
SetPrevBlockHash(blkhash).
SetTimeStamp(testutil.TimestampNow()).
AddActions(selp).SignAndBuild(ta.Keyinfo["bravo"].PubKey, ta.Keyinfo["bravo"].PriKey)
require.NoError(err)
err = bc.ValidateBlock(&nblk, true)
require.NotNil(err)
fmt.Printf("Cannot validate block %d: %v\n", blk.Height(), err)
// add block with zero prev hash
cbTsf2 := action.NewCoinBaseTransfer(1, big.NewInt(50), ta.Addrinfo["bravo"].Bech32())
require.NotNil(cbTsf2)
bd = &action.EnvelopeBuilder{}
elp = bd.SetNonce(1).
SetDestinationAddress(ta.Addrinfo["bravo"].Bech32()).
SetGasLimit(genesis.ActionGasLimit).
SetAction(cbTsf2).Build()
selp2, err := action.Sign(elp, ta.Addrinfo["bravo"].Bech32(), ta.Keyinfo["bravo"].PriKey)
require.NoError(err)
nblk, err = block.NewTestingBuilder().
SetChainID(0).
SetHeight(h+1).
SetPrevBlockHash(hash.ZeroHash32B).
SetTimeStamp(testutil.TimestampNow()).
AddActions(selp2).SignAndBuild(ta.Keyinfo["bravo"].PubKey, ta.Keyinfo["bravo"].PriKey)
require.NoError(err)
err = bc.ValidateBlock(&nblk, true)
require.NotNil(err)
fmt.Printf("Cannot validate block %d: %v\n", blk.Height(), err)
// add existing block again will have no effect
blk, err = bc.GetBlockByHeight(3)
require.NotNil(blk)
require.Nil(err)
require.NoError(bc.(*blockchain).commitBlock(blk))
fmt.Printf("Cannot add block 3 again: %v\n", err)
// check all Tx from block 4
blk, err = bc.GetBlockByHeight(4)
require.Nil(err)
require.Equal(hash4, blk.HashBlock())
tsfs, votes, _ := action.ClassifyActions(blk.Actions)
for _, transfer := range tsfs {
transferHash := transfer.Hash()
_, err := bc.GetBlockHashByTransferHash(transferHash)
require.NotNil(err)
_, err = bc.GetTransferByTransferHash(transferHash)
require.NotNil(err)
}
for _, vote := range votes {
voteHash := vote.Hash()
_, err := bc.GetBlockHashByVoteHash(voteHash)
require.NotNil(err)
_, err = bc.GetVoteByVoteHash(voteHash)
require.NotNil(err)
}
_, err = bc.GetTransfersFromAddress(ta.Addrinfo["charlie"].Bech32())
require.NotNil(err)
_, err = bc.GetTransfersToAddress(ta.Addrinfo["charlie"].Bech32())
require.NotNil(err)
_, err = bc.GetVotesFromAddress(ta.Addrinfo["charlie"].Bech32())
require.NotNil(err)
_, err = bc.GetVotesFromAddress(ta.Addrinfo["alfa"].Bech32())
require.NotNil(err)
_, err = bc.GetVotesToAddress(ta.Addrinfo["charlie"].Bech32())
require.NotNil(err)
_, err = bc.GetVotesToAddress(ta.Addrinfo["alfa"].Bech32())
require.NotNil(err)
_, err = bc.GetTotalTransfers()
require.NotNil(err)
_, err = bc.GetTotalVotes()
require.NotNil(err)
_, err = bc.GetTransferByTransferHash(hash.ZeroHash32B)
require.NotNil(err)
_, err = bc.GetVoteByVoteHash(hash.ZeroHash32B)
require.NotNil(err)
_, err = bc.StateByAddr("")
require.NotNil(err)
}
func TestBlockchain_Validator(t *testing.T) {
cfg := config.Default
// disable account-based testing
cfg.Chain.TrieDBPath = ""
ctx := context.Background()
bc := NewBlockchain(cfg, InMemDaoOption(), InMemStateFactoryOption())
require.NoError(t, bc.Start(ctx))
defer func() {
err := bc.Stop(ctx)
require.Nil(t, err)
}()
require.NotNil(t, bc)
val := bc.Validator()
require.NotNil(t, bc)
bc.SetValidator(val)
require.NotNil(t, bc.Validator())
}
func TestBlockchainInitialCandidate(t *testing.T) {
require := require.New(t)
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
cfg := config.Default
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
cfg.Chain.NumCandidates = 2
sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption())
require.Nil(err)
sf.AddActionHandlers(account.NewProtocol(), vote.NewProtocol(nil))
bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption())
require.NoError(bc.Start(context.Background()))
defer func() {
require.NoError(bc.Stop(context.Background()))
}()
// TODO: change the value when Candidates size is changed
height, err := sf.Height()
require.NoError(err)
require.Equal(uint64(0), height)
candidate, err := sf.CandidatesByHeight(height)
require.NoError(err)
require.True(len(candidate) == 2)
}
func TestCoinbaseTransfer(t *testing.T) {
require := require.New(t)
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
cfg := config.Default
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption())
sf.AddActionHandlers(account.NewProtocol())
require.Nil(err)
bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption())
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc))
require.NoError(bc.Start(context.Background()))
defer func() {
require.NoError(bc.Stop(context.Background()))
}()
require.NoError(addCreatorToFactory(sf))
height := bc.TipHeight()
require.Equal(0, int(height))
actionMap := make(map[string][]action.SealedEnvelope)
blk, err := bc.MintNewBlock(
actionMap,
ta.Keyinfo["alfa"].PubKey,
ta.Keyinfo["alfa"].PriKey,
ta.Addrinfo["alfa"].Bech32(),
0,
)
require.Nil(err)
s, err := bc.StateByAddr(ta.Addrinfo["alfa"].Bech32())
require.Nil(err)
require.Equal(big.NewInt(0), s.Balance)
require.Nil(bc.ValidateBlock(blk, true))
require.Nil(bc.CommitBlock(blk))
height = bc.TipHeight()
require.True(height == 1)
require.True(len(blk.Actions) == 1)
s, err = bc.StateByAddr(ta.Addrinfo["alfa"].Bech32())
require.Nil(err)
require.Equal(Gen.BlockReward, s.Balance)
}
func TestBlockchain_StateByAddr(t *testing.T) {
require := require.New(t)
cfg := config.Default
// disable account-based testing
// create chain
bc := NewBlockchain(cfg, InMemDaoOption(), InMemStateFactoryOption())
require.NoError(bc.Start(context.Background()))
require.NotNil(bc)
s, err := bc.StateByAddr(Gen.CreatorAddr())
require.NoError(err)
require.Equal(uint64(0), s.Nonce)
bal := big.NewInt(7700000000)
require.Equal(bal.Mul(bal, big.NewInt(1e18)).String(), s.Balance.String())
require.Equal(hash.ZeroHash32B, s.Root)
require.Equal([]byte(nil), s.CodeHash)
require.Equal(false, s.IsCandidate)
require.Equal(big.NewInt(0), s.VotingWeight)
require.Equal("", s.Votee)
}
func TestBlocks(t *testing.T) {
// This test is used for committing block verify benchmark purpose
t.Skip()
require := require.New(t)
cfg := config.Default
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
sf, _ := factory.NewFactory(cfg, factory.InMemTrieOption())
// Create a blockchain from scratch
bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption())
require.NoError(bc.Start(context.Background()))
defer func() {
require.NoError(bc.Stop(context.Background()))
}()
require.NoError(addCreatorToFactory(sf))
a := ta.Addrinfo["alfa"].Bech32()
priKeyA := ta.Keyinfo["alfa"].PriKey
c := ta.Addrinfo["bravo"].Bech32()
ws, err := sf.NewWorkingSet()
require.NoError(err)
_, err = account.LoadOrCreateAccount(ws, a, big.NewInt(100000))
require.NoError(err)
_, err = account.LoadOrCreateAccount(ws, c, big.NewInt(100000))
require.NoError(err)
gasLimit := testutil.TestGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
ProducerAddr: ta.Addrinfo["producer"].Bech32(),
GasLimit: &gasLimit,
EnableGasCharge: testutil.EnableGasCharge,
})
_, _, err = ws.RunActions(ctx, 0, nil)
require.NoError(err)
require.NoError(sf.Commit(ws))
for i := 0; i < 10; i++ {
acts := []action.SealedEnvelope{}
actionMap := make(map[string][]action.SealedEnvelope)
actionMap[a] = []action.SealedEnvelope{}
for i := 0; i < 1000; i++ {
tsf, err := testutil.SignedTransfer(a, c, priKeyA, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
require.NoError(err)
acts = append(acts, tsf)
actionMap[a] = append(actionMap[a], tsf)
}
blk, _ := bc.MintNewBlock(
actionMap,
ta.Keyinfo["producer"].PubKey,
ta.Keyinfo["producer"].PriKey,
ta.Addrinfo["producer"].Bech32(),
0,
)
require.Nil(bc.ValidateBlock(blk, true))
require.Nil(bc.CommitBlock(blk))
}
}
func TestActions(t *testing.T) {
// This test is used for block verify benchmark purpose
t.Skip()
require := require.New(t)
cfg := config.Default
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
sf, _ := factory.NewFactory(cfg, factory.InMemTrieOption())
// Create a blockchain from scratch
bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption())
require.NoError(bc.Start(context.Background()))
defer func() {
require.NoError(bc.Stop(context.Background()))
}()
require.NoError(addCreatorToFactory(sf))
a := ta.Addrinfo["alfa"].Bech32()
priKeyA := ta.Keyinfo["alfa"].PriKey
c := ta.Addrinfo["bravo"].Bech32()
ws, err := sf.NewWorkingSet()
require.NoError(err)
_, err = account.LoadOrCreateAccount(ws, a, big.NewInt(100000))
require.NoError(err)
_, err = account.LoadOrCreateAccount(ws, c, big.NewInt(100000))
require.NoError(err)
gasLimit := testutil.TestGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
ProducerAddr: ta.Addrinfo["producer"].Bech32(),
GasLimit: &gasLimit,
EnableGasCharge: testutil.EnableGasCharge,
})
_, _, err = ws.RunActions(ctx, 0, nil)
require.NoError(err)
require.NoError(sf.Commit(ws))
val := &validator{sf: sf, validatorAddr: ""}
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc))
actionMap := make(map[string][]action.SealedEnvelope)
for i := 0; i < 5000; i++ {
tsf, err := testutil.SignedTransfer(a, c, priKeyA, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
require.NoError(err)
actionMap[a] = append(actionMap[a], tsf)
vote, err := testutil.SignedVote(a, a, priKeyA, 1, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
require.NoError(err)
actionMap[a] = append(actionMap[a], vote)
}
blk, _ := bc.MintNewBlock(
actionMap,
ta.Keyinfo["producer"].PubKey,
ta.Keyinfo["producer"].PriKey,
ta.Addrinfo["producer"].Bech32(),
0,
)
require.Nil(val.Validate(blk, 0, blk.PrevHash(), true))
}
func TestStartExistingBlockchain(t *testing.T) {
require := require.New(t)
ctx := context.Background()
testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
// Disable block reward to make bookkeeping easier
Gen.BlockReward = big.NewInt(0)
cfg := config.Default
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption())
require.NoError(err)
// Create a blockchain from scratch
bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption())
require.NotNil(bc)
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc))
sf.AddActionHandlers(vote.NewProtocol(bc))
require.NoError(bc.Start(ctx))
defer func() {
require.NoError(bc.Stop(ctx))
}()
sf.AddActionHandlers(account.NewProtocol())
defer func() {
require.NoError(sf.Stop(ctx))
require.NoError(bc.Stop(ctx))
testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
}()
require.NoError(addTestingTsfBlocks(bc))
require.True(5 == bc.TipHeight())
// delete state db and recover to tip
testutil.CleanupPath(t, testTriePath)
sf, err = factory.NewFactory(cfg, factory.DefaultTrieOption())
require.NoError(err)
require.NoError(sf.Start(context.Background()))
sf.AddActionHandlers(account.NewProtocol())
sf.AddActionHandlers(vote.NewProtocol(bc))
chain, ok := bc.(*blockchain)
require.True(ok)
chain.sf = sf
require.NoError(chain.startExistingBlockchain(0))
height, _ := chain.sf.Height()
require.Equal(bc.TipHeight(), height)
// recover to height 3
testutil.CleanupPath(t, testTriePath)
sf, err = factory.NewFactory(cfg, factory.DefaultTrieOption())
require.NoError(err)
require.NoError(sf.Start(context.Background()))
sf.AddActionHandlers(account.NewProtocol())
sf.AddActionHandlers(vote.NewProtocol(bc))
chain.sf = sf
require.NoError(chain.startExistingBlockchain(3))
height, _ = chain.sf.Height()
require.Equal(bc.TipHeight(), height)
require.True(3 == height)
}
func addCreatorToFactory(sf factory.Factory) error {
ws, err := sf.NewWorkingSet()
if err != nil {
return err
}
if _, err = account.LoadOrCreateAccount(ws, ta.Addrinfo["producer"].Bech32(), Gen.TotalSupply); err != nil {
return err
}
gasLimit := testutil.TestGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
ProducerAddr: ta.Addrinfo["producer"].Bech32(),
GasLimit: &gasLimit,
EnableGasCharge: testutil.EnableGasCharge,
})
if _, _, err = ws.RunActions(ctx, 0, nil); err != nil {
return err
}
return sf.Commit(ws)
}
| 1 | 14,714 | line is 161 characters (from `lll`) | iotexproject-iotex-core | go |
@@ -19,13 +19,14 @@
package org.apache.iceberg.catalog;
+import java.io.Serializable;
import java.util.Arrays;
import org.apache.iceberg.relocated.com.google.common.base.Joiner;
/**
* A namespace in a {@link Catalog}.
*/
-public class Namespace {
+public class Namespace implements Serializable {
private static final Namespace EMPTY_NAMESPACE = new Namespace(new String[] {});
private static final Joiner DOT = Joiner.on('.');
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.catalog;
import java.util.Arrays;
import org.apache.iceberg.relocated.com.google.common.base.Joiner;
/**
* A namespace in a {@link Catalog}.
*/
public class Namespace {
private static final Namespace EMPTY_NAMESPACE = new Namespace(new String[] {});
private static final Joiner DOT = Joiner.on('.');
public static Namespace empty() {
return EMPTY_NAMESPACE;
}
public static Namespace of(String... levels) {
if (levels.length == 0) {
return empty();
}
return new Namespace(levels);
}
private final String[] levels;
private Namespace(String[] levels) {
this.levels = levels;
}
public String[] levels() {
return levels;
}
public String level(int pos) {
return levels[pos];
}
public boolean isEmpty() {
return levels.length == 0;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
Namespace namespace = (Namespace) other;
return Arrays.equals(levels, namespace.levels);
}
@Override
public int hashCode() {
return Arrays.hashCode(levels);
}
@Override
public String toString() {
return DOT.join(levels);
}
}
| 1 | 30,796 | Are these changes needed? | apache-iceberg | java |
@@ -78,12 +78,12 @@ public final class UserUtils {
}
try {
- Path dir = Paths.get(fileName).getParent();
+ final Path dir = Paths.get(fileName).getParent();
if (!dirToFilesMap.containsKey(dir)) {
- // There is not entry for this directory, create a watchkey
+ // There is no entry for this directory, create a watchkey
WatchKey watchKey = dir.register(watchService,
new WatchEvent.Kind[]{StandardWatchEventKinds.ENTRY_MODIFY},
- SensitivityWatchEventModifier.HIGH);
+ SensitivityWatchEventModifier.LOW);
keys.put(watchKey, dir);
}
// Add the config file to dir map | 1 | package azkaban.user;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import com.sun.nio.file.SensitivityWatchEventModifier;
import java.io.File;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardWatchEventKinds;
import java.nio.file.WatchEvent;
import java.nio.file.WatchKey;
import java.nio.file.WatchService;
import java.util.HashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class UserUtils {
private static final Logger log = LoggerFactory.getLogger(UserUtils.class);
private UserUtils() {
}
/**
* @return - Returns true if the given user is an ADMIN, or if user has the required permission
* for the action requested.
*/
public static boolean hasPermissionforAction(final UserManager userManager, final User user,
final Permission.Type type) {
for (final String roleName : user.getRoles()) {
final Role role = userManager.getRole(roleName);
final Permission perm = role.getPermission();
if (perm.isPermissionSet(Permission.Type.ADMIN) || perm.isPermissionSet(type)) {
return true;
}
}
return false;
}
/**
* Creates a watch thread which listens to specified files' modification and reloads
* configurations
*/
static void setupWatch(final Map<String, ParseConfigFile> configFileMap) throws IOException {
Preconditions.checkNotNull(configFileMap);
Preconditions.checkArgument(configFileMap.size() > 0);
final WatchService watchService;
try {
watchService = FileSystems.getDefault().newWatchService();
} catch (IOException e) {
log.warn(" Failed to create WatchService " + e.getMessage());
throw e;
}
// Map to store WatchKey to Dir mapping
final Map<WatchKey, Path> keys = new HashMap<>();
// A directory to config files multimap
final Multimap<Path, String> dirToFilesMap = HashMultimap.create();
// Iterate over each file.
for (Map.Entry<String, ParseConfigFile> entry : configFileMap.entrySet()) {
String fileName = entry.getKey();
ParseConfigFile parser = entry.getValue();
Preconditions.checkNotNull(fileName);
Preconditions.checkNotNull(parser);
final File file = new File(fileName);
if (!file.exists()) {
log.warn("Failed to setup watch service, user provided file " + fileName + " does not "
+ "exist.");
continue;
}
try {
Path dir = Paths.get(fileName).getParent();
if (!dirToFilesMap.containsKey(dir)) {
// There is not entry for this directory, create a watchkey
WatchKey watchKey = dir.register(watchService,
new WatchEvent.Kind[]{StandardWatchEventKinds.ENTRY_MODIFY},
SensitivityWatchEventModifier.HIGH);
keys.put(watchKey, dir);
}
// Add the config file to dir map
dirToFilesMap.put(dir, fileName);
} catch (IOException e) {
// Ignore the IOException
log.warn("IOException while setting up watch on conf " + fileName + ". "
+ e.getMessage());
}
}
// Return if WatchService is not initialized
if (keys.size() == 0) {
log.warn("Watchservice was not setup for any config file(s).");
try {
watchService.close();
} catch (IOException e) {
log.warn("IOException while closing watchService. " + e.getMessage());
}
return;
}
Runnable runnable = () -> {
// Watchservice is established, now listen for the events till eternity!
for (;; ) {
WatchKey watchKey;
try {
watchKey = watchService.take();
} catch (InterruptedException ie) {
log.warn(ie.getMessage());
Thread.currentThread().interrupt();
return;
}
// Get the directory for which watch service event triggered.
Path dir = keys.get(watchKey);
for (WatchEvent<?> event : watchKey.pollEvents()) {
// Make sure the modification happened to user config file
@SuppressWarnings("unchecked")
final Path name = ((WatchEvent<Path>) event).context();
final String filename = dir.resolve(name).toString();
// Lookup the file in dirToFilesMap
if (dirToFilesMap.containsEntry(dir, filename)) {
// Match!
// reparse the config file
log.info("Modification detected, reloading config file " + filename);
configFileMap.get(filename).parseConfigFile();
break;
}
}
watchKey.reset();
}
};
final Thread thread = new Thread(runnable);
log.info("Starting configuration watching thread.");
thread.start();
}
}
| 1 | 17,969 | It seems like reducing the sensitivity would make the test failure less likely to occur, but not fix the issue? | azkaban-azkaban | java |
@@ -3,7 +3,9 @@
namespace Thelia\Model;
use Thelia\Model\Base\CategoryDocumentI18n as BaseCategoryDocumentI18n;
+use Thelia\Model\Tools\I18nTimestampableTrait;
class CategoryDocumentI18n extends BaseCategoryDocumentI18n
{
+ use I18nTimestampableTrait;
} | 1 | <?php
namespace Thelia\Model;
use Thelia\Model\Base\CategoryDocumentI18n as BaseCategoryDocumentI18n;
class CategoryDocumentI18n extends BaseCategoryDocumentI18n
{
}
| 1 | 10,697 | missing use statement for importing the full namespace | thelia-thelia | php |
@@ -35,6 +35,9 @@ var ErrInsufficientHosts = &shared.InternalServiceError{Message: "Not enough hos
// ErrListenerAlreadyExist is thrown on a duplicate AddListener call from the same listener
var ErrListenerAlreadyExist = errors.New("Listener already exist for the service")
+// ErrIncorrectAddressFormat is thrown on incorrect address format
+var ErrIncorrectAddressFormat = errors.New("Incorrect address format")
+
type (
// ChangedEvent describes a change in membership | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package membership
import (
"errors"
"github.com/temporalio/temporal/.gen/go/shared"
)
// ErrUnknownService is thrown for a service that is not tracked by this instance
var ErrUnknownService = errors.New("Service not tracked by Monitor")
// ErrInsufficientHosts is thrown when there are not enough hosts to serve the request
var ErrInsufficientHosts = &shared.InternalServiceError{Message: "Not enough hosts to serve the request"}
// ErrListenerAlreadyExist is thrown on a duplicate AddListener call from the same listener
var ErrListenerAlreadyExist = errors.New("Listener already exist for the service")
type (
// ChangedEvent describes a change in membership
ChangedEvent struct {
HostsAdded []*HostInfo
HostsUpdated []*HostInfo
HostsRemoved []*HostInfo
}
// Monitor provides membership information for all cadence services.
// It can be used to query which member host of a service is responsible for serving a given key.
Monitor interface {
Start() error
Stop()
WhoAmI() (*HostInfo, error)
Lookup(service string, key string) (*HostInfo, error)
GetResolver(service string) (ServiceResolver, error)
// AddListener adds a listener for this service.
// The listener will get notified on the given
// channel, whenever there is a membership change.
// @service: The service to be listened on
// @name: The name for identifying the listener
// @notifyChannel: The channel on which the caller receives notifications
AddListener(service string, name string, notifyChannel chan<- *ChangedEvent) error
// RemoveListener removes a listener for this service.
RemoveListener(service string, name string) error
}
// ServiceResolver provides membership information for a specific cadence service.
// It can be used to resolve which member host is responsible for serving a given key.
ServiceResolver interface {
Lookup(key string) (*HostInfo, error)
// AddListener adds a listener which will get notified on the given
// channel, whenever membership changes.
// @name: The name for identifying the listener
// @notifyChannel: The channel on which the caller receives notifications
AddListener(name string, notifyChannel chan<- *ChangedEvent) error
// RemoveListener removes a listener for this service.
RemoveListener(name string) error
}
)
| 1 | 9,059 | Generally error messages in Go shouldn't start with capital letter. I see this rule is broken in many places here. Why? | temporalio-temporal | go |
@@ -285,7 +285,7 @@ describe('Core_selection', () => {
selectCell(0, 0);
keyDownUp('arrow_left');
- expect(getSelected()).toEqual([[0, 0, 0, 0]]);
+ expect(getSelected()).toEqual([[4, 4, 4, 4]]);
});
it('should fix start range if provided is out of bounds (to the top)', () => { | 1 | describe('Core_selection', () => {
var id = 'testContainer';
beforeEach(function() {
this.$container = $(`<div id="${id}"></div>`).appendTo('body');
});
afterEach(function() {
if (this.$container) {
destroy();
this.$container.remove();
}
});
it('should correctly render the selection using event simulation', () => {
handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(9, 8),
selectionMode: 'multiple',
colHeaders: true,
rowHeaders: true,
});
$(getCell(5, 4)).simulate('mousedown');
$(getCell(1, 1)).simulate('mouseover');
$(getCell(1, 1)).simulate('mouseup');
keyDown('ctrl');
$(getCell(0, 2)).simulate('mousedown');
$(getCell(8, 2)).simulate('mouseover');
$(getCell(7, 2)).simulate('mouseup');
$(getCell(2, 4)).simulate('mousedown');
$(getCell(2, 4)).simulate('mouseover');
$(getCell(2, 4)).simulate('mouseup');
$(getCell(7, 6)).simulate('mousedown');
$(getCell(8, 7)).simulate('mouseover');
$(getCell(8, 7)).simulate('mouseup');
expect(`
| ║ : - : - : - : - : : - : - |
|===:===:===:===:===:===:===:===:===|
| - ║ : : 0 : : : : : |
| - ║ : 0 : 1 : 0 : 0 : : : |
| - ║ : 0 : 1 : 0 : 1 : : : |
| - ║ : 0 : 1 : 0 : 0 : : : |
| - ║ : 0 : 1 : 0 : 0 : : : |
| - ║ : 0 : 1 : 0 : 0 : : : |
| - ║ : : 0 : : : : : |
| - ║ : : 0 : : : : A : 0 |
| - ║ : : 0 : : : : 0 : 0 |
`).toBeMatchToSelectionPattern();
});
it('should focus external textarea when clicked during editing', () => {
var textarea = $('<input type="text">').prependTo($('body'));
handsontable();
selectCell(0, 0);
keyDown('enter');
// $("html").triggerHandler('mouseup');
$('html').simulate('mouseup');
textarea.focus();
expect(document.activeElement).toBe(textarea[0]);
textarea.remove();
});
it('should deselect currently selected cell', () => {
handsontable();
selectCell(0, 0);
$('html').simulate('mousedown');
expect(getSelected()).toBeUndefined();
});
it('should not deselect the currently selected cell after clicking on a scrollbar', () => {
var hot = handsontable({
outsideClickDeselects: false,
minRows: 20,
minCols: 2,
width: 400,
height: 100
});
selectCell(0, 0);
var holderBoundingBox = hot.view.wt.wtTable.holder.getBoundingClientRect(),
verticalScrollbarCoords = {
x: holderBoundingBox.left + holderBoundingBox.width - 3,
y: holderBoundingBox.top + (holderBoundingBox.height / 2)
},
horizontalScrollbarCoords = {
x: holderBoundingBox.left + (holderBoundingBox.width / 2),
y: holderBoundingBox.top + holderBoundingBox.height - 3
};
$(hot.view.wt.wtTable.holder).simulate('mousedown', {
clientX: verticalScrollbarCoords.x,
clientY: verticalScrollbarCoords.y
});
expect(getSelected()).toEqual([[0, 0, 0, 0]]);
$(hot.view.wt.wtTable.holder).simulate('mousedown', {
clientX: horizontalScrollbarCoords.x,
clientY: horizontalScrollbarCoords.y
});
expect(getSelected()).toEqual([[0, 0, 0, 0]]);
});
it('should not deselect currently selected cell', () => {
handsontable({
outsideClickDeselects: false
});
selectCell(0, 0);
$('html').simulate('mousedown');
expect(getSelected()).toEqual([[0, 0, 0, 0]]);
});
it('should allow to focus on external input and hold current selection informations', () => {
var textarea = $('<input id="test_textarea" type="text">').prependTo($('body'));
handsontable({
outsideClickDeselects: false
});
selectCell(0, 0);
textarea.simulate('mousedown');
textarea.focus();
expect(document.activeElement.id).toEqual('test_textarea');
expect(getSelected()).toEqual([[0, 0, 0, 0]]);
textarea.remove();
});
it('should allow to type in external input while holding current selection information', () => {
var textarea = $('<textarea id="test_textarea"></textarea>').prependTo($('body'));
var keyPressed;
handsontable({
outsideClickDeselects: false
});
selectCell(0, 0);
textarea.focus();
textarea.simulate('mousedown');
textarea.simulate('mouseup');
textarea.on('keydown', (event) => {
keyPressed = event.keyCode;
});
var LETTER_A_KEY = 97;
$(document.activeElement).simulate('keydown', {
keyCode: LETTER_A_KEY
});
// textarea should receive the event and be an active element
expect(keyPressed).toEqual(LETTER_A_KEY);
expect(document.activeElement).toBe(document.getElementById('test_textarea'));
// should preserve selection, close editor and save changes
expect(getSelected()).toEqual([[0, 0, 0, 0]]);
expect(getDataAtCell(0, 0)).toBeNull();
textarea.remove();
});
it('should allow to type in external input after opening cell editor', () => {
var textarea = $('<textarea id="test_textarea"></textarea>').prependTo($('body'));
var keyPressed;
handsontable({
outsideClickDeselects: false
});
selectCell(0, 0);
keyDown('enter');
document.activeElement.value = 'Foo';
textarea.focus();
textarea.simulate('mousedown');
textarea.simulate('mouseup');
textarea.on('keydown', (event) => {
keyPressed = event.keyCode;
});
var LETTER_A_KEY = 97;
$(document.activeElement).simulate('keydown', {
keyCode: LETTER_A_KEY
});
// textarea should receive the event and be an active element
expect(keyPressed).toEqual(LETTER_A_KEY);
expect(document.activeElement).toBe(document.getElementById('test_textarea'));
// should preserve selection, close editor and save changes
expect(getSelected()).toEqual([[0, 0, 0, 0]]);
expect(getDataAtCell(0, 0)).toEqual('Foo');
textarea.remove();
});
it('should deselect on outside click if outsideClickDeselects is a function that returns true', () => {
var textarea = $('<textarea id="test_textarea"></textarea>').prependTo($('body'));
var keyPressed;
handsontable({
outsideClickDeselects: () => true,
});
selectCell(0, 0);
keyDown('enter');
document.activeElement.value = 'Foo';
textarea.focus();
textarea.simulate('mousedown');
textarea.simulate('mouseup');
textarea.on('keydown', (event) => {
keyPressed = event.keyCode;
});
var LETTER_A_KEY = 97;
$(document.activeElement).simulate('keydown', {
keyCode: LETTER_A_KEY
});
// textarea should receive the event and be an active element
expect(keyPressed).toEqual(LETTER_A_KEY);
expect(document.activeElement).toBe(document.getElementById('test_textarea'));
// should NOT preserve selection
expect(getSelected()).toBeUndefined();
expect(getDataAtCell(0, 0)).toEqual('Foo');
textarea.remove();
});
it('should not deselect on outside click if outsideClickDeselects is a function that returns false', () => {
var textarea = $('<textarea id="test_textarea"></textarea>').prependTo($('body'));
var keyPressed;
handsontable({
outsideClickDeselects: () => false,
});
selectCell(0, 0);
keyDown('enter');
document.activeElement.value = 'Foo';
textarea.focus();
textarea.simulate('mousedown');
textarea.simulate('mouseup');
textarea.on('keydown', (event) => {
keyPressed = event.keyCode;
});
var LETTER_A_KEY = 97;
$(document.activeElement).simulate('keydown', {
keyCode: LETTER_A_KEY
});
// textarea should receive the event and be an active element
expect(keyPressed).toEqual(LETTER_A_KEY);
expect(document.activeElement).toBe(document.getElementById('test_textarea'));
// should preserve selection, close editor and save changes
expect(getSelected()).toEqual([[0, 0, 0, 0]]);
expect(getDataAtCell(0, 0)).toEqual('Foo');
textarea.remove();
});
it('should fix start range if provided is out of bounds (to the left)', () => {
handsontable({
startRows: 5,
startCols: 5
});
selectCell(0, 0);
keyDownUp('arrow_left');
expect(getSelected()).toEqual([[0, 0, 0, 0]]);
});
it('should fix start range if provided is out of bounds (to the top)', () => {
handsontable({
startRows: 5,
startCols: 5
});
selectCell(0, 0);
keyDownUp('arrow_up');
expect(getSelected()).toEqual([[0, 0, 0, 0]]);
});
it('should fix start range if provided is out of bounds (to the right)', () => {
handsontable({
startRows: 5,
startCols: 5
});
selectCell(0, 4);
keyDownUp('arrow_right');
expect(getSelected()).toEqual([[0, 4, 0, 4]]);
});
it('should fix start range if provided is out of bounds (to the bottom)', () => {
handsontable({
startRows: 5,
startCols: 5
});
selectCell(4, 0);
keyDownUp('arrow_down');
expect(getSelected()).toEqual([[4, 0, 4, 0]]);
});
it('should fix end range if provided is out of bounds (to the left)', () => {
handsontable({
startRows: 5,
startCols: 5
});
selectCell(0, 1);
keyDownUp('shift+arrow_left');
keyDownUp('shift+arrow_left');
expect(getSelected()).toEqual([[0, 1, 0, 0]]);
});
it('should fix end range if provided is out of bounds (to the top)', () => {
handsontable({
startRows: 5,
startCols: 5
});
selectCell(1, 0);
keyDownUp('shift+arrow_up');
keyDownUp('shift+arrow_up');
expect(getSelected()).toEqual([[1, 0, 0, 0]]);
});
it('should fix end range if provided is out of bounds (to the right)', () => {
handsontable({
startRows: 5,
startCols: 5
});
selectCell(0, 3);
keyDownUp('shift+arrow_right');
keyDownUp('shift+arrow_right');
expect(getSelected()).toEqual([[0, 3, 0, 4]]);
});
it('should fix end range if provided is out of bounds (to the bottom)', () => {
handsontable({
startRows: 5,
startCols: 5
});
selectCell(3, 0);
keyDownUp('shift+arrow_down');
keyDownUp('shift+arrow_down');
keyDownUp('shift+arrow_down');
expect(getSelected()).toEqual([[3, 0, 4, 0]]);
});
it('should select multiple cells', () => {
handsontable({
startRows: 5,
startCols: 5
});
selectCell(3, 0, 4, 1);
expect(getSelected()).toEqual([[3, 0, 4, 1]]);
});
it('should call onSelectionEnd as many times as onSelection when `selectCell` is called', () => {
var tick = 0,
tickEnd = 0;
handsontable({
startRows: 5,
startCols: 5,
afterSelection() {
tick++;
},
afterSelectionEnd() {
tickEnd++;
}
});
selectCell(3, 0);
selectCell(1, 1);
expect(tick).toEqual(2);
expect(tickEnd).toEqual(2);
});
it('should call onSelectionEnd when user finishes selection by releasing SHIFT key (3 times)', () => {
var tick = 0;
handsontable({
startRows: 5,
startCols: 5,
afterSelectionEnd() {
tick++;
}
});
selectCell(3, 0); // makes tick++
keyDownUp('shift+arrow_down'); // makes tick++
keyDownUp('shift+arrow_down'); // makes tick++
keyDownUp('shift+arrow_down'); // makes tick++
expect(getSelected()).toEqual([[3, 0, 4, 0]]);
expect(tick).toEqual(4);
});
it('should call onSelectionEnd when user finishes selection by releasing SHIFT key (1 time)', () => {
var tick = 0;
handsontable({
startRows: 5,
startCols: 5,
afterSelectionEnd() {
tick++;
}
});
selectCell(3, 0); // makes tick++
keyDown('shift+arrow_down');
keyDown('shift+arrow_down');
keyDownUp('shift+arrow_down'); // makes tick++
expect(getSelected()).toEqual([[3, 0, 4, 0]]);
expect(tick).toEqual(2);
});
it('should select columns by click on header with SHIFT key', function() {
handsontable({
startRows: 5,
startCols: 5,
colHeaders: true
});
this.$container.find('.ht_clone_top tr:eq(0) th:eq(1)').simulate('mousedown');
this.$container.find('.ht_clone_top tr:eq(0) th:eq(1)').simulate('mouseup');
this.$container.find('.ht_clone_top tr:eq(0) th:eq(4)').simulate('mousedown', {shiftKey: true});
this.$container.find('.ht_clone_top tr:eq(0) th:eq(4)').simulate('mouseup');
expect(getSelected()).toEqual([[0, 1, 4, 4]]);
});
it('should select rows by click on header with SHIFT key', function() {
handsontable({
startRows: 5,
startCols: 5,
rowHeaders: true
});
this.$container.find('.ht_clone_left tr:eq(1) th:eq(0)').simulate('mousedown');
this.$container.find('.ht_clone_left tr:eq(1) th:eq(0)').simulate('mouseup');
this.$container.find('.ht_clone_left tr:eq(4) th:eq(0)').simulate('mousedown', {shiftKey: true});
this.$container.find('.ht_clone_left tr:eq(4) th:eq(0)').simulate('mouseup');
expect(getSelected()).toEqual([[1, 0, 4, 4]]);
});
it('should select columns by click on header with SHIFT key', function() {
handsontable({
startRows: 5,
startCols: 5,
colHeaders: true
});
this.$container.find('.ht_clone_top tr:eq(0) th:eq(1)').simulate('mousedown');
this.$container.find('.ht_clone_top tr:eq(0) th:eq(1)').simulate('mouseup');
this.$container.find('.ht_clone_top tr:eq(0) th:eq(4)').simulate('mousedown', {shiftKey: true});
this.$container.find('.ht_clone_top tr:eq(0) th:eq(4)').simulate('mouseup');
expect(getSelected()).toEqual([[0, 1, 4, 4]]);
});
it('should change selection after click on row header with SHIFT key', function() {
handsontable({
startRows: 5,
startCols: 5,
rowHeaders: true
});
selectCell(1, 1, 3, 3);
this.$container.find('.ht_clone_left tr:eq(4) th:eq(0)').simulate('mousedown', {shiftKey: true});
this.$container.find('.ht_clone_left tr:eq(4) th:eq(0)').simulate('mouseup');
expect(getSelected()).toEqual([[1, 0, 4, 4]]);
});
it('should change selection after click on column header with SHIFT key', function() {
handsontable({
startRows: 5,
startCols: 5,
colHeaders: true
});
selectCell(1, 1, 3, 3);
this.$container.find('.ht_clone_top tr:eq(0) th:eq(4)').simulate('mousedown', {shiftKey: true});
this.$container.find('.ht_clone_top tr:eq(0) th:eq(4)').simulate('mouseup');
expect(getSelected()).toEqual([[0, 1, 4, 4]]);
});
it('should call onSelection while user selects cells with mouse; onSelectionEnd when user finishes selection', function() {
var tick = 0,
tickEnd = 0;
handsontable({
startRows: 5,
startCols: 5,
afterSelection() {
tick++;
},
afterSelectionEnd() {
tickEnd++;
}
});
this.$container.find('tr:eq(0) td:eq(0)').simulate('mousedown');
this.$container.find('tr:eq(0) td:eq(1)').simulate('mouseover');
this.$container.find('tr:eq(1) td:eq(3)').simulate('mouseover');
this.$container.find('tr:eq(1) td:eq(3)').simulate('mouseup');
expect(getSelected()).toEqual([[0, 0, 1, 3]]);
expect(tick).toEqual(3);
expect(tickEnd).toEqual(1);
});
it('should properly select columns, when the user moves the cursor over column headers across two overlays', function() {
handsontable({
startRows: 5,
startCols: 5,
colHeaders: true,
fixedColumnsLeft: 2
});
this.$container.find('.ht_clone_left tr:eq(0) th:eq(1)').simulate('mousedown');
this.$container.find('.ht_clone_left tr:eq(0) th:eq(1)').simulate('mouseover');
this.$container.find('.ht_clone_top tr:eq(0) th:eq(2)').simulate('mouseover');
this.$container.find('.ht_clone_left tr:eq(0) th:eq(1)').simulate('mouseover');
this.$container.find('.ht_clone_left tr:eq(0) th:eq(1)').simulate('mouseup');
expect(getSelected()).toEqual([[0, 1, 4, 1]]);
});
it('should move focus to selected cell', () => {
var $input = $('<input>').appendTo(document.body);
handsontable({
startRows: 5,
startCols: 5
});
$input[0].focus();
selectCell(0, 0);
keyDownUp('enter');
expect(isEditorVisible()).toEqual(true);
$input.remove();
});
// This test should cover the #893 case, but it always passes. It seems like the keydown event (with CTRL key pressed) isn't delivered.
it('should not move focus from outside elements on CTRL keydown event, when no cell is selected', () => {
var $input = $('<input type="text"/>');
$('body').append($input);
handsontable();
selectCell(0, 0);
expect(document.activeElement.nodeName).toBeInArray(['TEXTAREA', 'BODY', 'HTML']);
$input.focus();
expect(document.activeElement.nodeName).toBe('INPUT');
// var keyDownEvent = $.Event('keydown', {ctrlKey: true, metaKey: true});
// $input.trigger(keyDownEvent);
$input.simulate('keydown', {ctrlKey: true, metaKey: true});
expect(document.activeElement.nodeName).toBe('INPUT');
$input.remove();
});
it('should select the entire column after column header is clicked', () => {
handsontable({
width: 200,
height: 100,
startRows: 10,
startCols: 5,
colHeaders: true
});
spec().$container.find('thead th:eq(0)').simulate('mousedown');
expect(getSelected()).toEqual([[0, 0, 9, 0]]);
expect(`
| * : : : : |
|===:===:===:===:===|
| A : : : : |
| 0 : : : : |
| 0 : : : : |
| 0 : : : : |
| 0 : : : : |
| 0 : : : : |
| 0 : : : : |
| 0 : : : : |
| 0 : : : : |
| 0 : : : : |
`).toBeMatchToSelectionPattern();
});
it('should select the entire column and row after column header and row header is clicked', () => {
handsontable({
width: 200,
height: 100,
startRows: 10,
startCols: 5,
colHeaders: true,
rowHeaders: true,
});
spec().$container.find('thead th:eq(3)').simulate('mousedown');
keyDown('ctrl');
spec().$container.find('tr:eq(2) th:eq(0)').simulate('mousedown');
expect(`
| ║ - : - : * : - : - |
|===:===:===:===:===:===|
| - ║ : : 0 : : |
| * ║ A : 0 : 1 : 0 : 0 |
| - ║ : : 0 : : |
| - ║ : : 0 : : |
| - ║ : : 0 : : |
| - ║ : : 0 : : |
| - ║ : : 0 : : |
| - ║ : : 0 : : |
| - ║ : : 0 : : |
| - ║ : : 0 : : |
`).toBeMatchToSelectionPattern();
});
it('should not overwrite background color of the cells with custom CSS classes', function() {
var hot = handsontable({
width: 300,
height: 150,
startRows: 5,
startCols: 5,
cells: (row, col) => (row === 1 && col === 1 ? {className: 'red-background'} : void 0)
});
$(getCell(0, 0)).simulate('mousedown');
$(getCell(4, 4)).simulate('mouseover');
$(getCell(4, 4)).simulate('mouseup');
expect(window.getComputedStyle(getCell(1, 1))['background-color']).toBe('rgb(255, 0, 0)');
});
it('should select the entire column after column header is clicked (in fixed rows/cols corner)', function() {
var hot = handsontable({
width: 200,
height: 100,
startRows: 10,
startCols: 5,
colHeaders: true,
rowHeaders: true,
fixedRowsTop: 2,
fixedColumnsLeft: 2
});
this.$container.find('.ht_master thead th:eq(1)').simulate('mousedown');
expect(getSelected()).toEqual([[0, 0, 9, 0]]);
expect(`
| ║ * : | : : |
|===:===:===:===:===:===|
| - ║ A : | : : |
| - ║ 0 : | : : |
|---:---:---:---:---:---|
| - ║ 0 : | : : |
| - ║ 0 : | : : |
| - ║ 0 : | : : |
| - ║ 0 : | : : |
| - ║ 0 : | : : |
| - ║ 0 : | : : |
| - ║ 0 : | : : |
| - ║ 0 : | : : |
`).toBeMatchToSelectionPattern();
});
it('should select the entire fixed column after column header is clicked, after scroll horizontally', () => {
const hot = handsontable({
width: 200,
height: 100,
startRows: 10,
startCols: 10,
colHeaders: true,
rowHeaders: true,
fixedColumnsLeft: 2
});
hot.render();
hot.view.wt.scrollHorizontal(20);
spec().$container.find('.ht_master thead th:eq(2)').simulate('mousedown');
spec().$container.find('.ht_master thead th:eq(2)').simulate('mouseup');
expect(getSelected()).toEqual([[0, 1, 9, 1]]);
expect(`
| ║ : * | : : : : : : : |
|===:===:===:===:===:===:===:===:===:===:===|
| - ║ : A | : : : : : : : |
| - ║ : 0 | : : : : : : : |
| - ║ : 0 | : : : : : : : |
| - ║ : 0 | : : : : : : : |
| - ║ : 0 | : : : : : : : |
| - ║ : 0 | : : : : : : : |
`).toBeMatchToSelectionPattern();
});
it('should set the selection end to the first visible row, when dragging the selection from a cell to a column header', (done) => {
var hot = handsontable({
width: 200,
height: 200,
startRows: 20,
startCols: 20,
colHeaders: true,
rowHeaders: true
});
hot.view.wt.scrollVertical(10);
hot.view.wt.scrollHorizontal(10);
hot.render();
setTimeout(() => {
$(getCell(12, 11)).simulate('mousedown');
spec().$container.find('.ht_clone_top thead th:eq(2)').simulate('mouseover');
}, 30);
setTimeout(() => {
expect(getSelected()).toEqual([[12, 11, 10, 11]]);
done();
}, 60);
});
it('should set the selection end to the first visible column, when dragging the selection from a cell to a row header', (done) => {
var hot = handsontable({
width: 200,
height: 200,
startRows: 20,
startCols: 20,
colHeaders: true,
rowHeaders: true
});
hot.view.wt.scrollVertical(10);
hot.view.wt.scrollHorizontal(10);
hot.render();
setTimeout(() => {
$(getCell(12, 11)).simulate('mousedown');
spec().$container.find('.ht_clone_left tbody th:eq(12)').simulate('mouseover');
}, 30);
setTimeout(() => {
expect(getSelected()).toEqual([[12, 11, 12, 10]]);
done();
}, 60);
});
it('should allow to scroll the table when a whole column is selected and table is longer than it\'s container', function(done) {
var errCount = 0;
$(window).on('error.selectionTest', () => {
errCount++;
});
var onAfterScrollVertically = jasmine.createSpy('onAfterScrollVertically');
var hot = handsontable({
height: 100,
width: 300,
startRows: 100,
startCols: 5,
colHeaders: true,
rowHeaders: true,
afterScrollVertically: onAfterScrollVertically
});
var mainHolder = hot.view.wt.wtTable.holder;
mainHolder.scrollTop = 0;
this.$container.find('thead tr:eq(0) th:eq(2)').simulate('mousedown');
this.$container.find('thead tr:eq(0) th:eq(2)').simulate('mouseup');
mainHolder.scrollTop = 120;
setTimeout(() => {
expect(errCount).toEqual(0); // expect no errors to be thrown
$(window).off('error.selectionTest');
done();
}, 100);
});
it('should scroll to the end of the selection, when selecting cells using the keyboard', () => {
var hot = handsontable({
height: 300,
width: 300,
startRows: 50,
startCols: 50,
colHeaders: true,
rowHeaders: true,
fixedRowsTop: 2,
fixedColumnsLeft: 2
});
var mainHolder = hot.view.wt.wtTable.holder;
mainHolder.scrollTop = 100;
selectCell(1, 3);
keyDownUp('arrow_down');
expect(mainHolder.scrollTop).toEqual(0);
mainHolder.scrollTop = 100;
selectCell(1, 3);
keyDownUp('shift+arrow_down');
expect(mainHolder.scrollTop).toEqual(0);
mainHolder.scrollLeft = 100;
selectCell(3, 1);
keyDownUp('arrow_right');
expect(mainHolder.scrollLeft).toEqual(0);
mainHolder.scrollLeft = 100;
selectCell(3, 1);
keyDownUp('shift+arrow_right');
expect(mainHolder.scrollLeft).toEqual(0);
var lastVisibleColumn = hot.view.wt.wtTable.getLastVisibleColumn();
selectCell(3, lastVisibleColumn);
keyDownUp('arrow_right');
expect(hot.view.wt.wtTable.getLastVisibleColumn()).toEqual(lastVisibleColumn + 1);
keyDownUp('arrow_right');
expect(hot.view.wt.wtTable.getLastVisibleColumn()).toEqual(lastVisibleColumn + 2);
keyDownUp('shift+arrow_right');
expect(hot.view.wt.wtTable.getLastVisibleColumn()).toEqual(lastVisibleColumn + 3);
var lastVisibleRow = hot.view.wt.wtTable.getLastVisibleRow();
selectCell(lastVisibleRow, 3);
keyDownUp('arrow_down');
expect(hot.view.wt.wtTable.getLastVisibleRow()).toEqual(lastVisibleRow + 1);
keyDownUp('arrow_down');
expect(hot.view.wt.wtTable.getLastVisibleRow()).toEqual(lastVisibleRow + 2);
keyDownUp('shift+arrow_down');
expect(hot.view.wt.wtTable.getLastVisibleRow()).toEqual(lastVisibleRow + 3);
});
it('should select the entire row after row header is clicked', function() {
var hot = handsontable({
startRows: 5,
startCols: 5,
colHeaders: true,
rowHeaders: true
});
this.$container.find('tr:eq(2) th:eq(0)').simulate('mousedown');
expect(getSelected()).toEqual([[1, 0, 1, 4]]);
expect(`
| ║ - : - : - : - : - |
|===:===:===:===:===:===|
| ║ : : : : |
| * ║ A : 0 : 0 : 0 : 0 |
| ║ : : : : |
| ║ : : : : |
| ║ : : : : |
`).toBeMatchToSelectionPattern();
});
it('should select the entire row of a partially fixed table after row header is clicked', function() {
handsontable({
startRows: 5,
startCols: 5,
colHeaders: true,
rowHeaders: true,
fixedRowsTop: 2,
fixedColumnsLeft: 2
});
this.$container.find('tr:eq(2) th:eq(0)').simulate('mousedown');
expect(getSelected()).toEqual([[1, 0, 1, 4]]);
this.$container.find('tr:eq(3) th:eq(0)').simulate('mousedown');
expect(getSelected()).toEqual([[2, 0, 2, 4]]);
});
it('should select a cell in a newly added row after automatic row adding, triggered by editing a cell in the last row with minSpareRows > 0, ' +
'unless editing happened within the fixed bottom rows', (done) => {
var hot = handsontable({
startRows: 5,
startCols: 2,
minSpareRows: 1
});
setTimeout(() => {
selectCell(4, 0);
keyDownUp('enter');
}, 10);
setTimeout(() => {
keyDownUp('enter');
}, 100);
setTimeout(() => {
expect(countRows()).toEqual(6);
expect(getSelected()).toEqual([[5, 0, 5, 0]]);
}, 200);
setTimeout(() => {
done();
}, 250);
});
it('should select a cell which one was added automatically by minSpareCols', () => {
handsontable({
data: Handsontable.helper.createSpreadsheetData(1, 5),
minSpareCols: 1,
});
selectCell(0, 5);
keyDownUp('tab');
expect(countCols()).toEqual(7);
expect(getSelected()).toEqual([[0, 6, 0, 6]]);
expect(getDataAtCell(0, 0)).toEqual('A1');
expect(getDataAtCell(0, 1)).toEqual('B1');
expect(getDataAtCell(0, 2)).toEqual('C1');
expect(getDataAtCell(0, 3)).toEqual('D1');
expect(getDataAtCell(0, 4)).toEqual('E1');
expect(getDataAtCell(0, 5)).toBeNull();
expect(getDataAtCell(0, 6)).toBeNull();
});
it('should change selected coords by modifying coords object via `modifyTransformStart` hook', () => {
var hot = handsontable({
startRows: 5,
startCols: 5
});
selectCell(0, 0);
hot.addHook('modifyTransformStart', (coords) => {
coords.col += 1;
coords.row += 1;
});
keyDown('arrow_down');
expect(getSelected()).toEqual([[2, 1, 2, 1]]);
});
it('should change selected coords by modifying coords object via `modifyTransformEnd` hook', () => {
var hot = handsontable({
startRows: 5,
startCols: 5
});
selectCell(0, 0);
hot.addHook('modifyTransformEnd', (coords) => {
coords.col += 2;
coords.row += 1;
});
keyDown('shift+arrow_down');
expect(getSelected()).toEqual([[0, 0, 2, 2]]);
});
it('should indicate is coords is out of bounds via `afterModifyTransformStart` hook', () => {
var spy = jasmine.createSpy();
var hot = handsontable({
startRows: 5,
startCols: 5
});
hot.addHook('afterModifyTransformStart', spy);
selectCell(2, 0);
keyDownUp('arrow_left');
expect(spy.calls.mostRecent().args[1]).toBe(0);
expect(spy.calls.mostRecent().args[2]).toBe(-1);
spy.calls.reset();
selectCell(2, 4);
keyDownUp('arrow_right');
expect(spy.calls.mostRecent().args[1]).toBe(0);
expect(spy.calls.mostRecent().args[2]).toBe(1);
spy.calls.reset();
selectCell(4, 2);
keyDownUp('arrow_down');
expect(spy.calls.mostRecent().args[1]).toBe(1);
expect(spy.calls.mostRecent().args[2]).toBe(0);
spy.calls.reset();
selectCell(0, 2);
keyDownUp('arrow_up');
expect(spy.calls.mostRecent().args[1]).toBe(-1);
expect(spy.calls.mostRecent().args[2]).toBe(0);
});
it('should indicate is coords is out of bounds via `afterModifyTransformEnd` hook', () => {
var spy = jasmine.createSpy();
var hot = handsontable({
startRows: 5,
startCols: 5
});
hot.addHook('afterModifyTransformEnd', spy);
selectCell(2, 0);
keyDownUp('shift+arrow_left');
expect(spy.calls.mostRecent().args[1]).toBe(0);
expect(spy.calls.mostRecent().args[2]).toBe(-1);
spy.calls.reset();
selectCell(2, 4);
keyDownUp('shift+arrow_right');
expect(spy.calls.mostRecent().args[1]).toBe(0);
expect(spy.calls.mostRecent().args[2]).toBe(1);
spy.calls.reset();
selectCell(4, 2);
keyDownUp('shift+arrow_down');
expect(spy.calls.mostRecent().args[1]).toBe(1);
expect(spy.calls.mostRecent().args[2]).toBe(0);
spy.calls.reset();
selectCell(0, 2);
keyDownUp('shift+arrow_up');
expect(spy.calls.mostRecent().args[1]).toBe(-1);
expect(spy.calls.mostRecent().args[2]).toBe(0);
});
it('should change selection after left mouse button on one of selected cell', () => {
var hot = handsontable({
startRows: 5,
startCols: 5
});
var cells = $('.ht_master.handsontable td');
cells.eq(6).simulate('mousedown');
cells.eq(18).simulate('mouseover');
cells.eq(18).simulate('mouseup');
expect(hot.getSelected()).toEqual([[1, 1, 3, 3]]);
cells.eq(16).simulate('mousedown');
cells.eq(16).simulate('mouseup');
expect(hot.getSelected()).toEqual([[3, 1, 3, 1]]);
});
it('should select the first row after corner header is clicked', () => {
handsontable({
startRows: 5,
startCols: 5,
colHeaders: true,
rowHeaders: true
});
spec().$container.find('thead').find('th').eq(0).simulate('mousedown');
expect(getSelected()).toEqual([[0, 0, 0, 0]]);
expect(`
| ║ - : : : : |
|===:===:===:===:===:===|
| - ║ # : : : : |
| ║ : : : : |
| ║ : : : : |
| ║ : : : : |
| ║ : : : : |
`).toBeMatchToSelectionPattern();
});
it('should redraw selection when option `colHeaders` is set and user scrolled', function (done) {
var hot = handsontable({
startRows: 20,
startCols: 20,
colHeaders: true,
rowHeaders: true,
width: 400,
height: 200
});
var cellVerticalPosition;
var borderOffsetInPixels = 1;
var topBorder;
selectCell(5, 5);
hot.view.wt.wtOverlays.topOverlay.scrollTo(2);
setTimeout(function () {
cellVerticalPosition = hot.getCell(5, 5).offsetTop;
topBorder = $('.wtBorder.current')[0];
expect(topBorder.offsetTop).toEqual(cellVerticalPosition - borderOffsetInPixels);
hot.view.wt.wtOverlays.topOverlay.scrollTo(0);
}, 100);
setTimeout(function () {
cellVerticalPosition = hot.getCell(5, 5).offsetTop;
topBorder = $('.wtBorder.current')[0];
expect(topBorder.offsetTop).toEqual(cellVerticalPosition - borderOffsetInPixels);
done();
}, 200);
});
it('should redraw selection on `leftOverlay` when options `colHeaders` and `fixedColumnsLeft` are set, and user scrolled', function (done) {
var hot = handsontable({
fixedColumnsLeft: 2,
startRows: 20,
startCols: 20,
colHeaders: true,
rowHeaders: true,
width: 400,
height: 200
});
var cellVerticalPosition;
var borderOffsetInPixels = 1;
var topBorder;
selectCell(1, 0);
hot.view.wt.wtOverlays.topOverlay.scrollTo(5);
setTimeout(function () {
cellVerticalPosition = hot.getCell(1, 0).offsetTop;
topBorder = $('.wtBorder.current')[0];
expect(topBorder.offsetTop).toEqual(cellVerticalPosition - borderOffsetInPixels);
hot.view.wt.wtOverlays.topOverlay.scrollTo(0);
}, 100);
setTimeout(function () {
cellVerticalPosition = hot.getCell(1, 0).offsetTop;
topBorder = $('.wtBorder.current')[0];
expect(topBorder.offsetTop).toEqual(cellVerticalPosition - borderOffsetInPixels);
done();
}, 200);
});
describe('multiple selection mode', () => {
it('should select cells by using two layers when CTRL key is pressed (default mode of the selectionMode option)', () => {
handsontable({
startRows: 8,
startCols: 10
});
$(getCell(1, 1)).simulate('mousedown');
$(getCell(4, 4)).simulate('mouseover');
$(getCell(4, 4)).simulate('mouseup');
expect(getSelected()).toEqual([[1, 1, 4, 4]]);
keyDown('ctrl');
$(getCell(3, 3)).simulate('mousedown');
$(getCell(5, 6)).simulate('mouseover');
$(getCell(5, 6)).simulate('mouseup');
expect(getSelected()).toEqual([[1, 1, 4, 4], [3, 3, 5, 6]]);
});
it('should be disallowed to select non-consecutive cells when selectionMode is set as `single`', () => {
handsontable({
startRows: 8,
startCols: 10,
selectionMode: 'single',
});
$(getCell(1, 1)).simulate('mousedown');
$(getCell(4, 4)).simulate('mouseover');
$(getCell(4, 4)).simulate('mouseup');
expect(getSelected()).toEqual([[1, 1, 1, 1]]);
keyDown('ctrl');
$(getCell(3, 3)).simulate('mousedown');
$(getCell(5, 6)).simulate('mouseover');
$(getCell(5, 6)).simulate('mouseup');
expect(getSelected()).toEqual([[3, 3, 3, 3]]);
});
it('should be allowed to select consecutive cells when selectionMode is set as `range`', () => {
handsontable({
startRows: 8,
startCols: 10,
selectionMode: 'range',
});
$(getCell(1, 1)).simulate('mousedown');
$(getCell(4, 4)).simulate('mouseover');
$(getCell(4, 4)).simulate('mouseup');
expect(getSelected()).toEqual([[1, 1, 4, 4]]);
$(getCell(3, 3)).simulate('mousedown');
$(getCell(5, 6)).simulate('mouseover');
$(getCell(5, 6)).simulate('mouseup');
expect(getSelected()).toEqual([[3, 3, 5, 6]]);
});
it('should be disallowed to select non-consecutive cells when selectionMode is set as `range`', () => {
handsontable({
startRows: 8,
startCols: 10,
selectionMode: 'range',
});
$(getCell(1, 1)).simulate('mousedown');
$(getCell(4, 4)).simulate('mouseover');
$(getCell(4, 4)).simulate('mouseup');
expect(getSelected()).toEqual([[1, 1, 4, 4]]);
keyDown('ctrl');
$(getCell(3, 3)).simulate('mousedown');
$(getCell(5, 6)).simulate('mouseover');
$(getCell(5, 6)).simulate('mouseup');
expect(getSelected()).toEqual([[3, 3, 5, 6]]);
});
it('should properly colorize selection layers including layer intersections', () => {
handsontable({
startRows: 21,
startCols: 30,
selectionMode: 'multiple',
colHeaders: true,
rowHeaders: true,
});
$(getCell(0, 0)).simulate('mousedown');
$(getCell(20, 15)).simulate('mouseover');
$(getCell(20, 15)).simulate('mouseup');
keyDown('ctrl');
$(getCell(1, 1)).simulate('mousedown');
$(getCell(19, 16)).simulate('mouseover');
$(getCell(19, 16)).simulate('mouseup');
$(getCell(2, 2)).simulate('mousedown');
$(getCell(18, 17)).simulate('mouseover');
$(getCell(18, 17)).simulate('mouseup');
$(getCell(3, 3)).simulate('mousedown');
$(getCell(17, 18)).simulate('mouseover');
$(getCell(17, 18)).simulate('mouseup');
$(getCell(4, 4)).simulate('mousedown');
$(getCell(16, 19)).simulate('mouseover');
$(getCell(16, 19)).simulate('mouseup');
$(getCell(5, 5)).simulate('mousedown');
$(getCell(15, 20)).simulate('mouseover');
$(getCell(15, 20)).simulate('mouseup');
$(getCell(6, 6)).simulate('mousedown');
$(getCell(14, 21)).simulate('mouseover');
$(getCell(14, 21)).simulate('mouseup');
$(getCell(7, 7)).simulate('mousedown');
$(getCell(13, 22)).simulate('mouseover');
$(getCell(13, 22)).simulate('mouseup');
$(getCell(8, 8)).simulate('mousedown');
$(getCell(12, 23)).simulate('mouseover');
$(getCell(12, 23)).simulate('mouseup');
$(getCell(9, 9)).simulate('mousedown');
$(getCell(11, 24)).simulate('mouseover');
$(getCell(11, 24)).simulate('mouseup');
$(getCell(10, 10)).simulate('mousedown');
$(getCell(10, 25)).simulate('mouseover');
$(getCell(10, 25)).simulate('mouseup');
expect(`
| ║ - : - : - : - : - : - : - : - : - : - : - : - : - : - : - : - : - : - : - : - : - : - : - : - : - : - : : : : |
|===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===:===|
| - ║ 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : : : : : : : : : : : : : : |
| - ║ 0 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 0 : : : : : : : : : : : : : |
| - ║ 0 : 1 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 1 : 0 : : : : : : : : : : : : |
| - ║ 0 : 1 : 2 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 2 : 1 : 0 : : : : : : : : : : : |
| - ║ 0 : 1 : 2 : 3 : 4 : 4 : 4 : 4 : 4 : 4 : 4 : 4 : 4 : 4 : 4 : 4 : 3 : 2 : 1 : 0 : : : : : : : : : : |
| - ║ 0 : 1 : 2 : 3 : 4 : 5 : 5 : 5 : 5 : 5 : 5 : 5 : 5 : 5 : 5 : 5 : 4 : 3 : 2 : 1 : 0 : : : : : : : : : |
| - ║ 0 : 1 : 2 : 3 : 4 : 5 : 6 : 6 : 6 : 6 : 6 : 6 : 6 : 6 : 6 : 6 : 5 : 4 : 3 : 2 : 1 : 0 : : : : : : : : |
| - ║ 0 : 1 : 2 : 3 : 4 : 5 : 6 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 6 : 5 : 4 : 3 : 2 : 1 : 0 : : : : : : : |
| - ║ 0 : 1 : 2 : 3 : 4 : 5 : 6 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 6 : 5 : 4 : 3 : 2 : 1 : 0 : : : : : : |
| - ║ 0 : 1 : 2 : 3 : 4 : 5 : 6 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 6 : 5 : 4 : 3 : 2 : 1 : 0 : : : : : |
| - ║ 0 : 1 : 2 : 3 : 4 : 5 : 6 : 7 : 7 : 7 : H : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 6 : 5 : 4 : 3 : 2 : 1 : 0 : : : : |
| - ║ 0 : 1 : 2 : 3 : 4 : 5 : 6 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 6 : 5 : 4 : 3 : 2 : 1 : 0 : : : : : |
| - ║ 0 : 1 : 2 : 3 : 4 : 5 : 6 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 6 : 5 : 4 : 3 : 2 : 1 : 0 : : : : : : |
| - ║ 0 : 1 : 2 : 3 : 4 : 5 : 6 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 7 : 6 : 5 : 4 : 3 : 2 : 1 : 0 : : : : : : : |
| - ║ 0 : 1 : 2 : 3 : 4 : 5 : 6 : 6 : 6 : 6 : 6 : 6 : 6 : 6 : 6 : 6 : 5 : 4 : 3 : 2 : 1 : 0 : : : : : : : : |
| - ║ 0 : 1 : 2 : 3 : 4 : 5 : 5 : 5 : 5 : 5 : 5 : 5 : 5 : 5 : 5 : 5 : 4 : 3 : 2 : 1 : 0 : : : : : : : : : |
| - ║ 0 : 1 : 2 : 3 : 4 : 4 : 4 : 4 : 4 : 4 : 4 : 4 : 4 : 4 : 4 : 4 : 3 : 2 : 1 : 0 : : : : : : : : : : |
| - ║ 0 : 1 : 2 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 3 : 2 : 1 : 0 : : : : : : : : : : : |
| - ║ 0 : 1 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 2 : 1 : 0 : : : : : : : : : : : : |
| - ║ 0 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 1 : 0 : : : : : : : : : : : : : |
| - ║ 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 : : : : : : : : : : : : : : |
`).toBeMatchToSelectionPattern();
});
it('should call afterSelection and afterSelectionEnd hooks with proper arguments', () => {
const hooks = jasmine.createSpyObj('hooks', ['afterSelection', 'afterSelectionEnd']);
handsontable({
startRows: 21,
startCols: 30,
selectionMode: 'multiple',
afterSelection: hooks.afterSelection,
afterSelectionEnd: hooks.afterSelectionEnd,
});
$(getCell(0, 0)).simulate('mousedown');
$(getCell(20, 15)).simulate('mouseover');
$(getCell(20, 15)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([0, 0, 0, 0, jasmine.any(Object), 0]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([0, 0, 20, 15, jasmine.any(Object), 0]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([0, 0, 20, 15, 0, void 0]);
keyDown('ctrl');
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(1, 1)).simulate('mousedown');
$(getCell(19, 16)).simulate('mouseover');
$(getCell(19, 16)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([1, 1, 1, 1, jasmine.any(Object), 1]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([1, 1, 19, 16, jasmine.any(Object), 1]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([1, 1, 19, 16, 1, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(2, 2)).simulate('mousedown');
$(getCell(18, 17)).simulate('mouseover');
$(getCell(18, 17)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([2, 2, 2, 2, jasmine.any(Object), 2]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([2, 2, 18, 17, jasmine.any(Object), 2]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([2, 2, 18, 17, 2, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(3, 3)).simulate('mousedown');
$(getCell(17, 18)).simulate('mouseover');
$(getCell(17, 18)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([3, 3, 3, 3, jasmine.any(Object), 3]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([3, 3, 17, 18, jasmine.any(Object), 3]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([3, 3, 17, 18, 3, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(4, 4)).simulate('mousedown');
$(getCell(16, 19)).simulate('mouseover');
$(getCell(16, 19)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([4, 4, 4, 4, jasmine.any(Object), 4]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([4, 4, 16, 19, jasmine.any(Object), 4]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([4, 4, 16, 19, 4, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(5, 5)).simulate('mousedown');
$(getCell(15, 20)).simulate('mouseover');
$(getCell(15, 20)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([5, 5, 5, 5, jasmine.any(Object), 5]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([5, 5, 15, 20, jasmine.any(Object), 5]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([5, 5, 15, 20, 5, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(6, 6)).simulate('mousedown');
$(getCell(14, 21)).simulate('mouseover');
$(getCell(14, 21)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([6, 6, 6, 6, jasmine.any(Object), 6]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([6, 6, 14, 21, jasmine.any(Object), 6]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([6, 6, 14, 21, 6, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(7, 7)).simulate('mousedown');
$(getCell(13, 22)).simulate('mouseover');
$(getCell(13, 22)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([7, 7, 7, 7, jasmine.any(Object), 7]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([7, 7, 13, 22, jasmine.any(Object), 7]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([7, 7, 13, 22, 7, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(8, 8)).simulate('mousedown');
$(getCell(12, 23)).simulate('mouseover');
$(getCell(12, 23)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([8, 8, 8, 8, jasmine.any(Object), 8]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([8, 8, 12, 23, jasmine.any(Object), 8]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([8, 8, 12, 23, 8, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(9, 9)).simulate('mousedown');
$(getCell(11, 24)).simulate('mouseover');
$(getCell(11, 24)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([9, 9, 9, 9, jasmine.any(Object), 9]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([9, 9, 11, 24, jasmine.any(Object), 9]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([9, 9, 11, 24, 9, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(10, 10)).simulate('mousedown');
$(getCell(10, 25)).simulate('mouseover');
$(getCell(10, 25)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([10, 10, 10, 10, jasmine.any(Object), 10]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([10, 10, 10, 25, jasmine.any(Object), 10]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([10, 10, 10, 25, 10, void 0]);
});
it('should call afterSelectionByProp and afterSelectionEndByProp hooks with proper arguments', () => {
const hooks = jasmine.createSpyObj('hooks', ['afterSelection', 'afterSelectionEnd']);
handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(21, 30),
selectionMode: 'multiple',
afterSelectionByProp: hooks.afterSelection,
afterSelectionEndByProp: hooks.afterSelectionEnd,
});
$(getCell(0, 0)).simulate('mousedown');
$(getCell(20, 15)).simulate('mouseover');
$(getCell(20, 15)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([0, 'prop0', 0, 'prop0', jasmine.any(Object), 0]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([0, 'prop0', 20, 'prop15', jasmine.any(Object), 0]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([0, 'prop0', 20, 'prop15', 0, void 0]);
keyDown('ctrl');
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(1, 1)).simulate('mousedown');
$(getCell(19, 16)).simulate('mouseover');
$(getCell(19, 16)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([1, 'prop1', 1, 'prop1', jasmine.any(Object), 1]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([1, 'prop1', 19, 'prop16', jasmine.any(Object), 1]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([1, 'prop1', 19, 'prop16', 1, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(2, 2)).simulate('mousedown');
$(getCell(18, 17)).simulate('mouseover');
$(getCell(18, 17)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([2, 'prop2', 2, 'prop2', jasmine.any(Object), 2]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([2, 'prop2', 18, 'prop17', jasmine.any(Object), 2]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([2, 'prop2', 18, 'prop17', 2, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(3, 3)).simulate('mousedown');
$(getCell(17, 18)).simulate('mouseover');
$(getCell(17, 18)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([3, 'prop3', 3, 'prop3', jasmine.any(Object), 3]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([3, 'prop3', 17, 'prop18', jasmine.any(Object), 3]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([3, 'prop3', 17, 'prop18', 3, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(4, 4)).simulate('mousedown');
$(getCell(16, 19)).simulate('mouseover');
$(getCell(16, 19)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([4, 'prop4', 4, 'prop4', jasmine.any(Object), 4]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([4, 'prop4', 16, 'prop19', jasmine.any(Object), 4]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([4, 'prop4', 16, 'prop19', 4, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(5, 5)).simulate('mousedown');
$(getCell(15, 20)).simulate('mouseover');
$(getCell(15, 20)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([5, 'prop5', 5, 'prop5', jasmine.any(Object), 5]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([5, 'prop5', 15, 'prop20', jasmine.any(Object), 5]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([5, 'prop5', 15, 'prop20', 5, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(6, 6)).simulate('mousedown');
$(getCell(14, 21)).simulate('mouseover');
$(getCell(14, 21)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([6, 'prop6', 6, 'prop6', jasmine.any(Object), 6]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([6, 'prop6', 14, 'prop21', jasmine.any(Object), 6]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([6, 'prop6', 14, 'prop21', 6, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(7, 7)).simulate('mousedown');
$(getCell(13, 22)).simulate('mouseover');
$(getCell(13, 22)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([7, 'prop7', 7, 'prop7', jasmine.any(Object), 7]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([7, 'prop7', 13, 'prop22', jasmine.any(Object), 7]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([7, 'prop7', 13, 'prop22', 7, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(8, 8)).simulate('mousedown');
$(getCell(12, 23)).simulate('mouseover');
$(getCell(12, 23)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([8, 'prop8', 8, 'prop8', jasmine.any(Object), 8]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([8, 'prop8', 12, 'prop23', jasmine.any(Object), 8]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([8, 'prop8', 12, 'prop23', 8, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(9, 9)).simulate('mousedown');
$(getCell(11, 24)).simulate('mouseover');
$(getCell(11, 24)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([9, 'prop9', 9, 'prop9', jasmine.any(Object), 9]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([9, 'prop9', 11, 'prop24', jasmine.any(Object), 9]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([9, 'prop9', 11, 'prop24', 9, void 0]);
hooks.afterSelection.calls.reset();
hooks.afterSelectionEnd.calls.reset();
$(getCell(10, 10)).simulate('mousedown');
$(getCell(10, 25)).simulate('mouseover');
$(getCell(10, 25)).simulate('mouseup');
expect(hooks.afterSelection.calls.count()).toBe(2);
expect(hooks.afterSelection.calls.argsFor(0)).toEqual([10, 'prop10', 10, 'prop10', jasmine.any(Object), 10]);
expect(hooks.afterSelection.calls.argsFor(1)).toEqual([10, 'prop10', 10, 'prop25', jasmine.any(Object), 10]);
expect(hooks.afterSelectionEnd.calls.count()).toBe(1);
expect(hooks.afterSelectionEnd.calls.argsFor(0)).toEqual([10, 'prop10', 10, 'prop25', 10, void 0]);
});
});
});
| 1 | 14,824 | These test checks if selection coordinates don't return negative values in that edge cases. Please revert the changes and set `autoWrapCol` and `autoWrapRow` to `false` to the Handsontable instance. This change applies to the entire Core_selection.spec.js file. | handsontable-handsontable | js |
@@ -60,7 +60,17 @@ namespace Nethermind.Evm.TransactionProcessing
/// <summary>
/// Commit and later restore state, use for CallAndRestore
/// </summary>
- CommitAndRestore = Commit | Restore
+ CommitAndRestore = Commit | Restore,
+
+ /// <summary>
+ /// Zero Gas price
+ /// </summary>
+ ZeroGasPrice = 4,
+
+ /// <summary>
+ /// Commit and restore with zero gas price
+ /// </summary>
+ CommitAndRestoreWithZeroGasPrice = CommitAndRestore | ZeroGasPrice
}
public TransactionProcessor( | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.IO;
using System.Linq;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Core.Specs;
using Nethermind.Crypto;
using Nethermind.Evm.Tracing;
using Nethermind.Int256;
using Nethermind.Logging;
using Nethermind.Specs;
using Nethermind.State;
using Transaction = Nethermind.Core.Transaction;
namespace Nethermind.Evm.TransactionProcessing
{
public class TransactionProcessor : ITransactionProcessor
{
private readonly EthereumEcdsa _ecdsa;
private readonly ILogger _logger;
private readonly IStateProvider _stateProvider;
private readonly IStorageProvider _storageProvider;
private readonly ISpecProvider _specProvider;
private readonly IVirtualMachine _virtualMachine;
[Flags]
private enum ExecutionOptions
{
/// <summary>
/// Just accumulate the state
/// </summary>
None = 0,
/// <summary>
/// Commit the state after execution
/// </summary>
Commit = 1,
/// <summary>
/// Restore state after execution
/// </summary>
Restore = 2,
/// <summary>
/// Commit and later restore state, use for CallAndRestore
/// </summary>
CommitAndRestore = Commit | Restore
}
public TransactionProcessor(
ISpecProvider? specProvider,
IStateProvider? stateProvider,
IStorageProvider? storageProvider,
IVirtualMachine? virtualMachine,
ILogManager? logManager)
{
_logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager));
_specProvider = specProvider ?? throw new ArgumentNullException(nameof(specProvider));
_virtualMachine = virtualMachine ?? throw new ArgumentNullException(nameof(virtualMachine));
_stateProvider = stateProvider ?? throw new ArgumentNullException(nameof(stateProvider));
_storageProvider = storageProvider ?? throw new ArgumentNullException(nameof(storageProvider));
_ecdsa = new EthereumEcdsa(specProvider.ChainId, logManager);
}
public void CallAndRestore(Transaction transaction, BlockHeader block, ITxTracer txTracer)
{
Execute(transaction, block, txTracer, ExecutionOptions.CommitAndRestore);
}
public void BuildUp(Transaction transaction, BlockHeader block, ITxTracer txTracer)
{
// we need to treat the result of previous transaction as the original value of next transaction
// when we do not commit
_storageProvider.TakeSnapshot(true);
Execute(transaction, block, txTracer, ExecutionOptions.None);
}
public void Execute(Transaction transaction, BlockHeader block, ITxTracer txTracer)
{
Execute(transaction, block, txTracer, ExecutionOptions.Commit);
}
private void QuickFail(Transaction tx, BlockHeader block, ITxTracer txTracer, bool eip658NotEnabled, string? reason)
{
block.GasUsed += tx.GasLimit;
Address recipient = tx.To ?? ContractAddress.From(
tx.SenderAddress ?? Address.Zero,
_stateProvider.GetNonce(tx.SenderAddress ?? Address.Zero));
if (txTracer.IsTracingReceipt)
{
Keccak? stateRoot = null;
if (eip658NotEnabled)
{
_stateProvider.RecalculateStateRoot();
stateRoot = _stateProvider.StateRoot;
}
txTracer.MarkAsFailed(recipient, tx.GasLimit, Array.Empty<byte>(), reason ?? "invalid", stateRoot);
}
}
private void Execute(Transaction transaction, BlockHeader block, ITxTracer txTracer, ExecutionOptions executionOptions)
{
bool eip658NotEnabled = !_specProvider.GetSpec(block.Number).IsEip658Enabled;
// restore is CallAndRestore - previous call, we will restore state after the execution
bool restore = (executionOptions & ExecutionOptions.Restore) != ExecutionOptions.None;
// commit - is for standard execute, we will commit thee state after execution
bool commit = (executionOptions & ExecutionOptions.Commit) != ExecutionOptions.None || eip658NotEnabled;
//!commit - is for build up during block production, we won't commit state after each transaction to support rollbacks
//we commit only after all block is constructed
bool notSystemTransaction = !transaction.IsSystem();
bool deleteCallerAccount = false;
IReleaseSpec spec = _specProvider.GetSpec(block.Number);
if (!notSystemTransaction)
{
spec = new SystemTransactionReleaseSpec(spec);
}
UInt256 value = transaction.Value;
if (!transaction.TryCalculatePremiumPerGas(block.BaseFeePerGas, out UInt256 premiumPerGas) && !restore)
{
TraceLogInvalidTx(transaction, "MINER_PREMIUM_IS_NEGATIVE");
QuickFail(transaction, block, txTracer, eip658NotEnabled, "miner premium is negative");
return;
}
UInt256 gasPrice = transaction.CalculateEffectiveGasPrice(spec.IsEip1559Enabled, block.BaseFeePerGas);
long gasLimit = transaction.GasLimit;
byte[] machineCode = transaction.IsContractCreation ? transaction.Data : null;
byte[] data = transaction.IsMessageCall ? transaction.Data : Array.Empty<byte>();
Address? caller = transaction.SenderAddress;
if (_logger.IsTrace) _logger.Trace($"Executing tx {transaction.Hash}");
if (caller is null)
{
TraceLogInvalidTx(transaction, "SENDER_NOT_SPECIFIED");
QuickFail(transaction, block, txTracer, eip658NotEnabled, "sender not specified");
return;
}
long intrinsicGas = IntrinsicGasCalculator.Calculate(transaction, spec);
if (_logger.IsTrace) _logger.Trace($"Intrinsic gas calculated for {transaction.Hash}: " + intrinsicGas);
if (notSystemTransaction)
{
if (gasLimit < intrinsicGas)
{
TraceLogInvalidTx(transaction, $"GAS_LIMIT_BELOW_INTRINSIC_GAS {gasLimit} < {intrinsicGas}");
QuickFail(transaction, block, txTracer, eip658NotEnabled, "gas limit below intrinsic gas");
return;
}
if (!restore && gasLimit > block.GasLimit - block.GasUsed)
{
TraceLogInvalidTx(transaction,
$"BLOCK_GAS_LIMIT_EXCEEDED {gasLimit} > {block.GasLimit} - {block.GasUsed}");
QuickFail(transaction, block, txTracer, eip658NotEnabled, "block gas limit exceeded");
return;
}
}
if (!_stateProvider.AccountExists(caller))
{
// hacky fix for the potential recovery issue
if (transaction.Signature != null)
{
transaction.SenderAddress = _ecdsa.RecoverAddress(transaction, !spec.ValidateChainId);
}
if (caller != transaction.SenderAddress)
{
if (_logger.IsWarn) _logger.Warn($"TX recovery issue fixed - tx was coming with sender {caller} and the now it recovers to {transaction.SenderAddress}");
caller = transaction.SenderAddress;
}
else
{
TraceLogInvalidTx(transaction, $"SENDER_ACCOUNT_DOES_NOT_EXIST {caller}");
if (!commit || restore || gasPrice == UInt256.Zero)
{
deleteCallerAccount = !commit || restore;
_stateProvider.CreateAccount(caller, UInt256.Zero);
}
}
if (caller is null)
{
throw new InvalidDataException(
$"Failed to recover sender address on tx {transaction.Hash} when previously recovered sender account did not exist.");
}
}
UInt256 senderReservedGasPayment = restore ? UInt256.Zero : (ulong) gasLimit * gasPrice;
if (notSystemTransaction)
{
UInt256 senderBalance = _stateProvider.GetBalance(caller);
if (!restore && ((ulong) intrinsicGas * gasPrice + value > senderBalance || senderReservedGasPayment + value > senderBalance))
{
TraceLogInvalidTx(transaction, $"INSUFFICIENT_SENDER_BALANCE: ({caller})_BALANCE = {senderBalance}");
QuickFail(transaction, block, txTracer, eip658NotEnabled, "insufficient sender balance");
return;
}
if (!restore && spec.IsEip1559Enabled && !transaction.IsServiceTransaction && senderBalance < (UInt256)transaction.GasLimit * transaction.MaxFeePerGas + value)
{
TraceLogInvalidTx(transaction, $"INSUFFICIENT_MAX_FEE_PER_GAS_FOR_SENDER_BALANCE: ({caller})_BALANCE = {senderBalance}, MAX_FEE_PER_GAS: {transaction.MaxFeePerGas}");
QuickFail(transaction, block, txTracer, eip658NotEnabled, "insufficient MaxFeePerGas for sender balance");
return;
}
if (transaction.Nonce != _stateProvider.GetNonce(caller))
{
TraceLogInvalidTx(transaction, $"WRONG_TRANSACTION_NONCE: {transaction.Nonce} (expected {_stateProvider.GetNonce(caller)})");
QuickFail(transaction, block, txTracer, eip658NotEnabled, "wrong transaction nonce");
return;
}
_stateProvider.IncrementNonce(caller);
}
_stateProvider.SubtractFromBalance(caller, senderReservedGasPayment, spec);
if (commit)
{
_stateProvider.Commit(spec, txTracer.IsTracingState ? txTracer : NullTxTracer.Instance);
}
long unspentGas = gasLimit - intrinsicGas;
long spentGas = gasLimit;
int stateSnapshot = _stateProvider.TakeSnapshot();
int storageSnapshot = _storageProvider.TakeSnapshot();
_stateProvider.SubtractFromBalance(caller, value, spec);
byte statusCode = StatusCode.Failure;
TransactionSubstate substate = null;
Address? recipientOrNull = null;
try
{
Address recipient = transaction.GetRecipient(transaction.IsContractCreation ? _stateProvider.GetNonce(caller) : 0);
if (transaction.IsContractCreation)
{
if (_stateProvider.AccountExists(recipient))
{
if (_virtualMachine.GetCachedCodeInfo(recipient, spec).MachineCode.Length != 0 || _stateProvider.GetNonce(recipient) != 0)
{
if (_logger.IsTrace)
{
_logger.Trace($"Contract collision at {recipient}"); // the account already owns the contract with the code
}
throw new TransactionCollisionException();
}
_stateProvider.UpdateStorageRoot(recipient, Keccak.EmptyTreeHash);
}
}
if (recipient == null)
{
throw new InvalidDataException("Recipient has not been resolved properly before tx execution");
}
recipientOrNull = recipient;
ExecutionEnvironment env = new();
env.TxExecutionContext = new TxExecutionContext(block, caller, gasPrice);
env.Value = value;
env.TransferValue = value;
env.Caller = caller;
env.CodeSource = recipient;
env.ExecutingAccount = recipient;
env.InputData = data ?? Array.Empty<byte>();
env.CodeInfo = machineCode == null ? _virtualMachine.GetCachedCodeInfo(recipient, spec) : new CodeInfo(machineCode);
ExecutionType executionType = transaction.IsContractCreation ? ExecutionType.Create : ExecutionType.Call;
using (EvmState state = new(unspentGas, env, executionType, true, stateSnapshot, storageSnapshot, false))
{
if (spec.UseTxAccessLists)
{
state.WarmUp(transaction.AccessList); // eip-2930
}
if (spec.UseHotAndColdStorage)
{
state.WarmUp(caller); // eip-2929
state.WarmUp(recipient); // eip-2929
}
substate = _virtualMachine.Run(state, txTracer);
unspentGas = state.GasAvailable;
if (txTracer.IsTracingAccess)
{
txTracer.ReportAccess(state.AccessedAddresses, state.AccessedStorageCells);
}
}
if (substate.ShouldRevert || substate.IsError)
{
if (_logger.IsTrace) _logger.Trace("Restoring state from before transaction");
_stateProvider.Restore(stateSnapshot);
_storageProvider.Restore(storageSnapshot);
}
else
{
// tks: there is similar code fo contract creation from init and from CREATE
// this may lead to inconsistencies (however it is tested extensively in blockchain tests)
if (transaction.IsContractCreation)
{
long codeDepositGasCost = CodeDepositHandler.CalculateCost(substate.Output.Length, spec);
if (unspentGas < codeDepositGasCost && spec.ChargeForTopLevelCreate)
{
throw new OutOfGasException();
}
if (CodeDepositHandler.CodeIsInvalid(spec, substate.Output))
{
throw new InvalidCodeException();
}
if (unspentGas >= codeDepositGasCost)
{
Keccak codeHash = _stateProvider.UpdateCode(substate.Output);
_stateProvider.UpdateCodeHash(recipient, codeHash, spec);
unspentGas -= codeDepositGasCost;
}
}
foreach (Address toBeDestroyed in substate.DestroyList)
{
if (_logger.IsTrace) _logger.Trace($"Destroying account {toBeDestroyed}");
_storageProvider.ClearStorage(toBeDestroyed);
_stateProvider.DeleteAccount(toBeDestroyed);
if (txTracer.IsTracingRefunds) txTracer.ReportRefund(RefundOf.Destroy(spec.IsEip3529Enabled));
}
statusCode = StatusCode.Success;
}
spentGas = Refund(gasLimit, unspentGas, substate, caller, gasPrice, spec);
}
catch (Exception ex) when (ex is EvmException || ex is OverflowException) // TODO: OverflowException? still needed? hope not
{
if (_logger.IsTrace) _logger.Trace($"EVM EXCEPTION: {ex.GetType().Name}");
_stateProvider.Restore(stateSnapshot);
_storageProvider.Restore(storageSnapshot);
}
if (_logger.IsTrace) _logger.Trace("Gas spent: " + spentGas);
Address gasBeneficiary = block.GasBeneficiary;
if (statusCode == StatusCode.Failure || !(substate?.DestroyList.Contains(gasBeneficiary) ?? false))
{
if (notSystemTransaction)
{
if (!_stateProvider.AccountExists(gasBeneficiary))
{
_stateProvider.CreateAccount(gasBeneficiary, (ulong) spentGas * premiumPerGas);
}
else
{
_stateProvider.AddToBalance(gasBeneficiary, (ulong) spentGas * premiumPerGas, spec);
}
}
}
if (restore)
{
_storageProvider.Reset();
_stateProvider.Reset();
if (deleteCallerAccount)
{
_stateProvider.DeleteAccount(caller);
}
else
{
_stateProvider.AddToBalance(caller, senderReservedGasPayment, spec);
if (notSystemTransaction)
{
_stateProvider.DecrementNonce(caller);
}
_stateProvider.Commit(spec);
}
}
else if (commit)
{
_storageProvider.Commit(txTracer.IsTracingState ? txTracer : NullStorageTracer.Instance);
_stateProvider.Commit(spec, txTracer.IsTracingState ? txTracer : NullStateTracer.Instance);
}
if (!restore && notSystemTransaction)
{
block.GasUsed += spentGas;
}
if (txTracer.IsTracingReceipt)
{
Keccak stateRoot = null;
if (eip658NotEnabled)
{
_stateProvider.RecalculateStateRoot();
stateRoot = _stateProvider.StateRoot;
}
if (statusCode == StatusCode.Failure)
{
txTracer.MarkAsFailed(recipientOrNull, spentGas, (substate?.ShouldRevert ?? false) ? substate.Output.ToArray() : Array.Empty<byte>(), substate?.Error, stateRoot);
}
else
{
txTracer.MarkAsSuccess(recipientOrNull, spentGas, substate.Output.ToArray(), substate.Logs.Any() ? substate.Logs.ToArray() : Array.Empty<LogEntry>(), stateRoot);
}
}
}
private void TraceLogInvalidTx(Transaction transaction, string reason)
{
if (_logger.IsTrace) _logger.Trace($"Invalid tx {transaction.Hash} ({reason})");
}
private long Refund(long gasLimit, long unspentGas, TransactionSubstate substate, Address sender, UInt256 gasPrice, IReleaseSpec spec)
{
long spentGas = gasLimit;
if (!substate.IsError)
{
spentGas -= unspentGas;
long refund = substate.ShouldRevert ? 0 : RefundHelper.CalculateClaimableRefund(spentGas, substate.Refund + substate.DestroyList.Count * RefundOf.Destroy(spec.IsEip3529Enabled), spec);
if (_logger.IsTrace) _logger.Trace("Refunding unused gas of " + unspentGas + " and refund of " + refund);
_stateProvider.AddToBalance(sender, (ulong) (unspentGas + refund) * gasPrice, spec);
spentGas -= refund;
}
return spentGas;
}
}
}
| 1 | 26,074 | I would prefer flag SkipGasPricingValidation | NethermindEth-nethermind | .cs |
@@ -1492,6 +1492,13 @@ void nano::work_watcher::add (std::shared_ptr<nano::block> block_a)
}
}
+bool nano::work_watcher::is_watched (nano::qualified_root const root_a)
+{
+ std::unique_lock<std::mutex> lock (mutex);
+ auto exists (blocks.find (root_a));
+ return exists != blocks.end ();
+}
+
void nano::wallets::do_wallet_actions ()
{
std::unique_lock<std::mutex> action_lock (action_mutex); | 1 | #include <nano/node/wallet.hpp>
#include <nano/crypto_lib/random_pool.hpp>
#include <nano/lib/utility.hpp>
#include <nano/node/node.hpp>
#include <nano/node/wallet.hpp>
#include <nano/node/xorshift.hpp>
#include <argon2.h>
#include <boost/filesystem.hpp>
#include <boost/polymorphic_cast.hpp>
#include <future>
nano::uint256_union nano::wallet_store::check (nano::transaction const & transaction_a)
{
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::check_special));
return value.key;
}
nano::uint256_union nano::wallet_store::salt (nano::transaction const & transaction_a)
{
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::salt_special));
return value.key;
}
void nano::wallet_store::wallet_key (nano::raw_key & prv_a, nano::transaction const & transaction_a)
{
std::lock_guard<std::recursive_mutex> lock (mutex);
nano::raw_key wallet_l;
wallet_key_mem.value (wallet_l);
nano::raw_key password_l;
password.value (password_l);
prv_a.decrypt (wallet_l.data, password_l, salt (transaction_a).owords[0]);
}
void nano::wallet_store::seed (nano::raw_key & prv_a, nano::transaction const & transaction_a)
{
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::seed_special));
nano::raw_key password_l;
wallet_key (password_l, transaction_a);
prv_a.decrypt (value.key, password_l, salt (transaction_a).owords[seed_iv_index]);
}
void nano::wallet_store::seed_set (nano::transaction const & transaction_a, nano::raw_key const & prv_a)
{
nano::raw_key password_l;
wallet_key (password_l, transaction_a);
nano::uint256_union ciphertext;
ciphertext.encrypt (prv_a, password_l, salt (transaction_a).owords[seed_iv_index]);
entry_put_raw (transaction_a, nano::wallet_store::seed_special, nano::wallet_value (ciphertext, 0));
deterministic_clear (transaction_a);
}
nano::public_key nano::wallet_store::deterministic_insert (nano::transaction const & transaction_a)
{
auto index (deterministic_index_get (transaction_a));
nano::raw_key prv;
deterministic_key (prv, transaction_a, index);
nano::public_key result (nano::pub_key (prv.data));
while (exists (transaction_a, result))
{
++index;
deterministic_key (prv, transaction_a, index);
result = nano::pub_key (prv.data);
}
uint64_t marker (1);
marker <<= 32;
marker |= index;
entry_put_raw (transaction_a, result, nano::wallet_value (nano::uint256_union (marker), 0));
++index;
deterministic_index_set (transaction_a, index);
return result;
}
nano::public_key nano::wallet_store::deterministic_insert (nano::transaction const & transaction_a, uint32_t const index)
{
nano::raw_key prv;
deterministic_key (prv, transaction_a, index);
nano::public_key result (nano::pub_key (prv.data));
uint64_t marker (1);
marker <<= 32;
marker |= index;
entry_put_raw (transaction_a, result, nano::wallet_value (nano::uint256_union (marker), 0));
return result;
}
void nano::wallet_store::deterministic_key (nano::raw_key & prv_a, nano::transaction const & transaction_a, uint32_t index_a)
{
assert (valid_password (transaction_a));
nano::raw_key seed_l;
seed (seed_l, transaction_a);
nano::deterministic_key (seed_l.data, index_a, prv_a.data);
}
uint32_t nano::wallet_store::deterministic_index_get (nano::transaction const & transaction_a)
{
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::deterministic_index_special));
return static_cast<uint32_t> (value.key.number () & static_cast<uint32_t> (-1));
}
void nano::wallet_store::deterministic_index_set (nano::transaction const & transaction_a, uint32_t index_a)
{
nano::uint256_union index_l (index_a);
nano::wallet_value value (index_l, 0);
entry_put_raw (transaction_a, nano::wallet_store::deterministic_index_special, value);
}
void nano::wallet_store::deterministic_clear (nano::transaction const & transaction_a)
{
nano::uint256_union key (0);
for (auto i (begin (transaction_a)), n (end ()); i != n;)
{
switch (key_type (nano::wallet_value (i->second)))
{
case nano::key_type::deterministic:
{
nano::uint256_union key (i->first);
erase (transaction_a, key);
i = begin (transaction_a, key);
break;
}
default:
{
++i;
break;
}
}
}
deterministic_index_set (transaction_a, 0);
}
bool nano::wallet_store::valid_password (nano::transaction const & transaction_a)
{
nano::raw_key zero;
zero.data.clear ();
nano::raw_key wallet_key_l;
wallet_key (wallet_key_l, transaction_a);
nano::uint256_union check_l;
check_l.encrypt (zero, wallet_key_l, salt (transaction_a).owords[check_iv_index]);
bool ok = check (transaction_a) == check_l;
return ok;
}
bool nano::wallet_store::attempt_password (nano::transaction const & transaction_a, std::string const & password_a)
{
bool result = false;
{
std::lock_guard<std::recursive_mutex> lock (mutex);
nano::raw_key password_l;
derive_key (password_l, transaction_a, password_a);
password.value_set (password_l);
result = !valid_password (transaction_a);
}
if (!result)
{
switch (version (transaction_a))
{
case version_1:
upgrade_v1_v2 (transaction_a);
case version_2:
upgrade_v2_v3 (transaction_a);
case version_3:
upgrade_v3_v4 (transaction_a);
case version_4:
break;
default:
assert (false);
}
}
return result;
}
bool nano::wallet_store::rekey (nano::transaction const & transaction_a, std::string const & password_a)
{
std::lock_guard<std::recursive_mutex> lock (mutex);
bool result (false);
if (valid_password (transaction_a))
{
nano::raw_key password_new;
derive_key (password_new, transaction_a, password_a);
nano::raw_key wallet_key_l;
wallet_key (wallet_key_l, transaction_a);
nano::raw_key password_l;
password.value (password_l);
password.value_set (password_new);
nano::uint256_union encrypted;
encrypted.encrypt (wallet_key_l, password_new, salt (transaction_a).owords[0]);
nano::raw_key wallet_enc;
wallet_enc.data = encrypted;
wallet_key_mem.value_set (wallet_enc);
entry_put_raw (transaction_a, nano::wallet_store::wallet_key_special, nano::wallet_value (encrypted, 0));
}
else
{
result = true;
}
return result;
}
void nano::wallet_store::derive_key (nano::raw_key & prv_a, nano::transaction const & transaction_a, std::string const & password_a)
{
auto salt_l (salt (transaction_a));
kdf.phs (prv_a, password_a, salt_l);
}
nano::fan::fan (nano::uint256_union const & key, size_t count_a)
{
std::unique_ptr<nano::uint256_union> first (new nano::uint256_union (key));
for (auto i (1); i < count_a; ++i)
{
std::unique_ptr<nano::uint256_union> entry (new nano::uint256_union);
nano::random_pool::generate_block (entry->bytes.data (), entry->bytes.size ());
*first ^= *entry;
values.push_back (std::move (entry));
}
values.push_back (std::move (first));
}
void nano::fan::value (nano::raw_key & prv_a)
{
std::lock_guard<std::mutex> lock (mutex);
value_get (prv_a);
}
void nano::fan::value_get (nano::raw_key & prv_a)
{
assert (!mutex.try_lock ());
prv_a.data.clear ();
for (auto & i : values)
{
prv_a.data ^= *i;
}
}
void nano::fan::value_set (nano::raw_key const & value_a)
{
std::lock_guard<std::mutex> lock (mutex);
nano::raw_key value_l;
value_get (value_l);
*(values[0]) ^= value_l.data;
*(values[0]) ^= value_a.data;
}
// Wallet version number
nano::uint256_union const nano::wallet_store::version_special (0);
// Random number used to salt private key encryption
nano::uint256_union const nano::wallet_store::salt_special (1);
// Key used to encrypt wallet keys, encrypted itself by the user password
nano::uint256_union const nano::wallet_store::wallet_key_special (2);
// Check value used to see if password is valid
nano::uint256_union const nano::wallet_store::check_special (3);
// Representative account to be used if we open a new account
nano::uint256_union const nano::wallet_store::representative_special (4);
// Wallet seed for deterministic key generation
nano::uint256_union const nano::wallet_store::seed_special (5);
// Current key index for deterministic keys
nano::uint256_union const nano::wallet_store::deterministic_index_special (6);
int const nano::wallet_store::special_count (7);
size_t const nano::wallet_store::check_iv_index (0);
size_t const nano::wallet_store::seed_iv_index (1);
nano::wallet_store::wallet_store (bool & init_a, nano::kdf & kdf_a, nano::transaction & transaction_a, nano::account representative_a, unsigned fanout_a, std::string const & wallet_a, std::string const & json_a) :
password (0, fanout_a),
wallet_key_mem (0, fanout_a),
kdf (kdf_a)
{
init_a = false;
initialize (transaction_a, init_a, wallet_a);
if (!init_a)
{
MDB_val junk;
assert (mdb_get (tx (transaction_a), handle, nano::mdb_val (version_special), &junk) == MDB_NOTFOUND);
boost::property_tree::ptree wallet_l;
std::stringstream istream (json_a);
try
{
boost::property_tree::read_json (istream, wallet_l);
}
catch (...)
{
init_a = true;
}
for (auto i (wallet_l.begin ()), n (wallet_l.end ()); i != n; ++i)
{
nano::uint256_union key;
init_a = key.decode_hex (i->first);
if (!init_a)
{
nano::uint256_union value;
init_a = value.decode_hex (wallet_l.get<std::string> (i->first));
if (!init_a)
{
entry_put_raw (transaction_a, key, nano::wallet_value (value, 0));
}
else
{
init_a = true;
}
}
else
{
init_a = true;
}
}
init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (version_special), &junk) != 0;
init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (wallet_key_special), &junk) != 0;
init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (salt_special), &junk) != 0;
init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (check_special), &junk) != 0;
init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (representative_special), &junk) != 0;
nano::raw_key key;
key.data.clear ();
password.value_set (key);
key.data = entry_get_raw (transaction_a, nano::wallet_store::wallet_key_special).key;
wallet_key_mem.value_set (key);
}
}
nano::wallet_store::wallet_store (bool & init_a, nano::kdf & kdf_a, nano::transaction & transaction_a, nano::account representative_a, unsigned fanout_a, std::string const & wallet_a) :
password (0, fanout_a),
wallet_key_mem (0, fanout_a),
kdf (kdf_a)
{
init_a = false;
initialize (transaction_a, init_a, wallet_a);
if (!init_a)
{
int version_status;
MDB_val version_value;
version_status = mdb_get (tx (transaction_a), handle, nano::mdb_val (version_special), &version_value);
if (version_status == MDB_NOTFOUND)
{
version_put (transaction_a, version_current);
nano::uint256_union salt_l;
random_pool::generate_block (salt_l.bytes.data (), salt_l.bytes.size ());
entry_put_raw (transaction_a, nano::wallet_store::salt_special, nano::wallet_value (salt_l, 0));
// Wallet key is a fixed random key that encrypts all entries
nano::raw_key wallet_key;
random_pool::generate_block (wallet_key.data.bytes.data (), sizeof (wallet_key.data.bytes));
nano::raw_key password_l;
password_l.data.clear ();
password.value_set (password_l);
nano::raw_key zero;
zero.data.clear ();
// Wallet key is encrypted by the user's password
nano::uint256_union encrypted;
encrypted.encrypt (wallet_key, zero, salt_l.owords[0]);
entry_put_raw (transaction_a, nano::wallet_store::wallet_key_special, nano::wallet_value (encrypted, 0));
nano::raw_key wallet_key_enc;
wallet_key_enc.data = encrypted;
wallet_key_mem.value_set (wallet_key_enc);
nano::uint256_union check;
check.encrypt (zero, wallet_key, salt_l.owords[check_iv_index]);
entry_put_raw (transaction_a, nano::wallet_store::check_special, nano::wallet_value (check, 0));
entry_put_raw (transaction_a, nano::wallet_store::representative_special, nano::wallet_value (representative_a, 0));
nano::raw_key seed;
random_pool::generate_block (seed.data.bytes.data (), seed.data.bytes.size ());
seed_set (transaction_a, seed);
entry_put_raw (transaction_a, nano::wallet_store::deterministic_index_special, nano::wallet_value (nano::uint256_union (0), 0));
}
}
nano::raw_key key;
key.data = entry_get_raw (transaction_a, nano::wallet_store::wallet_key_special).key;
wallet_key_mem.value_set (key);
}
std::vector<nano::account> nano::wallet_store::accounts (nano::transaction const & transaction_a)
{
std::vector<nano::account> result;
for (auto i (begin (transaction_a)), n (end ()); i != n; ++i)
{
nano::account account (i->first);
result.push_back (account);
}
return result;
}
void nano::wallet_store::initialize (nano::transaction const & transaction_a, bool & init_a, std::string const & path_a)
{
assert (strlen (path_a.c_str ()) == path_a.size ());
auto error (0);
error |= mdb_dbi_open (tx (transaction_a), path_a.c_str (), MDB_CREATE, &handle);
init_a = error != 0;
}
bool nano::wallet_store::is_representative (nano::transaction const & transaction_a)
{
return exists (transaction_a, representative (transaction_a));
}
void nano::wallet_store::representative_set (nano::transaction const & transaction_a, nano::account const & representative_a)
{
entry_put_raw (transaction_a, nano::wallet_store::representative_special, nano::wallet_value (representative_a, 0));
}
nano::account nano::wallet_store::representative (nano::transaction const & transaction_a)
{
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::representative_special));
return value.key;
}
nano::public_key nano::wallet_store::insert_adhoc (nano::transaction const & transaction_a, nano::raw_key const & prv)
{
assert (valid_password (transaction_a));
nano::public_key pub (nano::pub_key (prv.data));
nano::raw_key password_l;
wallet_key (password_l, transaction_a);
nano::uint256_union ciphertext;
ciphertext.encrypt (prv, password_l, pub.owords[0].number ());
entry_put_raw (transaction_a, pub, nano::wallet_value (ciphertext, 0));
return pub;
}
void nano::wallet_store::insert_watch (nano::transaction const & transaction_a, nano::public_key const & pub)
{
entry_put_raw (transaction_a, pub, nano::wallet_value (nano::uint256_union (0), 0));
}
void nano::wallet_store::erase (nano::transaction const & transaction_a, nano::public_key const & pub)
{
auto status (mdb_del (tx (transaction_a), handle, nano::mdb_val (pub), nullptr));
assert (status == 0);
}
nano::wallet_value nano::wallet_store::entry_get_raw (nano::transaction const & transaction_a, nano::public_key const & pub_a)
{
nano::wallet_value result;
nano::mdb_val value;
auto status (mdb_get (tx (transaction_a), handle, nano::mdb_val (pub_a), value));
if (status == 0)
{
result = nano::wallet_value (value);
}
else
{
result.key.clear ();
result.work = 0;
}
return result;
}
void nano::wallet_store::entry_put_raw (nano::transaction const & transaction_a, nano::public_key const & pub_a, nano::wallet_value const & entry_a)
{
auto status (mdb_put (tx (transaction_a), handle, nano::mdb_val (pub_a), entry_a.val (), 0));
assert (status == 0);
}
nano::key_type nano::wallet_store::key_type (nano::wallet_value const & value_a)
{
auto number (value_a.key.number ());
nano::key_type result;
auto text (number.convert_to<std::string> ());
if (number > std::numeric_limits<uint64_t>::max ())
{
result = nano::key_type::adhoc;
}
else
{
if ((number >> 32).convert_to<uint32_t> () == 1)
{
result = nano::key_type::deterministic;
}
else
{
result = nano::key_type::unknown;
}
}
return result;
}
bool nano::wallet_store::fetch (nano::transaction const & transaction_a, nano::public_key const & pub, nano::raw_key & prv)
{
auto result (false);
if (valid_password (transaction_a))
{
nano::wallet_value value (entry_get_raw (transaction_a, pub));
if (!value.key.is_zero ())
{
switch (key_type (value))
{
case nano::key_type::deterministic:
{
nano::raw_key seed_l;
seed (seed_l, transaction_a);
uint32_t index (static_cast<uint32_t> (value.key.number () & static_cast<uint32_t> (-1)));
deterministic_key (prv, transaction_a, index);
break;
}
case nano::key_type::adhoc:
{
// Ad-hoc keys
nano::raw_key password_l;
wallet_key (password_l, transaction_a);
prv.decrypt (value.key, password_l, pub.owords[0].number ());
break;
}
default:
{
result = true;
break;
}
}
}
else
{
result = true;
}
}
else
{
result = true;
}
if (!result)
{
nano::public_key compare (nano::pub_key (prv.data));
if (!(pub == compare))
{
result = true;
}
}
return result;
}
bool nano::wallet_store::exists (nano::transaction const & transaction_a, nano::public_key const & pub)
{
return !pub.is_zero () && find (transaction_a, pub) != end ();
}
void nano::wallet_store::serialize_json (nano::transaction const & transaction_a, std::string & string_a)
{
boost::property_tree::ptree tree;
for (nano::store_iterator<nano::uint256_union, nano::wallet_value> i (std::make_unique<nano::mdb_iterator<nano::uint256_union, nano::wallet_value>> (transaction_a, handle)), n (nullptr); i != n; ++i)
{
tree.put (i->first.to_string (), i->second.key.to_string ());
}
std::stringstream ostream;
boost::property_tree::write_json (ostream, tree);
string_a = ostream.str ();
}
void nano::wallet_store::write_backup (nano::transaction const & transaction_a, boost::filesystem::path const & path_a)
{
std::ofstream backup_file;
backup_file.open (path_a.string ());
if (!backup_file.fail ())
{
// Set permissions to 600
boost::system::error_code ec;
nano::set_secure_perm_file (path_a, ec);
std::string json;
serialize_json (transaction_a, json);
backup_file << json;
}
}
bool nano::wallet_store::move (nano::transaction const & transaction_a, nano::wallet_store & other_a, std::vector<nano::public_key> const & keys)
{
assert (valid_password (transaction_a));
assert (other_a.valid_password (transaction_a));
auto result (false);
for (auto i (keys.begin ()), n (keys.end ()); i != n; ++i)
{
nano::raw_key prv;
auto error (other_a.fetch (transaction_a, *i, prv));
result = result | error;
if (!result)
{
insert_adhoc (transaction_a, prv);
other_a.erase (transaction_a, *i);
}
}
return result;
}
bool nano::wallet_store::import (nano::transaction const & transaction_a, nano::wallet_store & other_a)
{
assert (valid_password (transaction_a));
assert (other_a.valid_password (transaction_a));
auto result (false);
for (auto i (other_a.begin (transaction_a)), n (end ()); i != n; ++i)
{
nano::raw_key prv;
auto error (other_a.fetch (transaction_a, nano::uint256_union (i->first), prv));
result = result | error;
if (!result)
{
if (!prv.data.is_zero ())
{
insert_adhoc (transaction_a, prv);
}
else
{
insert_watch (transaction_a, nano::uint256_union (i->first));
}
other_a.erase (transaction_a, nano::uint256_union (i->first));
}
}
return result;
}
bool nano::wallet_store::work_get (nano::transaction const & transaction_a, nano::public_key const & pub_a, uint64_t & work_a)
{
auto result (false);
auto entry (entry_get_raw (transaction_a, pub_a));
if (!entry.key.is_zero ())
{
work_a = entry.work;
}
else
{
result = true;
}
return result;
}
void nano::wallet_store::work_put (nano::transaction const & transaction_a, nano::public_key const & pub_a, uint64_t work_a)
{
auto entry (entry_get_raw (transaction_a, pub_a));
assert (!entry.key.is_zero ());
entry.work = work_a;
entry_put_raw (transaction_a, pub_a, entry);
}
unsigned nano::wallet_store::version (nano::transaction const & transaction_a)
{
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::version_special));
auto entry (value.key);
auto result (static_cast<unsigned> (entry.bytes[31]));
return result;
}
void nano::wallet_store::version_put (nano::transaction const & transaction_a, unsigned version_a)
{
nano::uint256_union entry (version_a);
entry_put_raw (transaction_a, nano::wallet_store::version_special, nano::wallet_value (entry, 0));
}
void nano::wallet_store::upgrade_v1_v2 (nano::transaction const & transaction_a)
{
assert (version (transaction_a) == 1);
nano::raw_key zero_password;
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::wallet_key_special));
nano::raw_key kdf;
kdf.data.clear ();
zero_password.decrypt (value.key, kdf, salt (transaction_a).owords[0]);
derive_key (kdf, transaction_a, "");
nano::raw_key empty_password;
empty_password.decrypt (value.key, kdf, salt (transaction_a).owords[0]);
for (auto i (begin (transaction_a)), n (end ()); i != n; ++i)
{
nano::public_key key (i->first);
nano::raw_key prv;
if (fetch (transaction_a, key, prv))
{
// Key failed to decrypt despite valid password
nano::wallet_value data (entry_get_raw (transaction_a, key));
prv.decrypt (data.key, zero_password, salt (transaction_a).owords[0]);
nano::public_key compare (nano::pub_key (prv.data));
if (compare == key)
{
// If we successfully decrypted it, rewrite the key back with the correct wallet key
insert_adhoc (transaction_a, prv);
}
else
{
// Also try the empty password
nano::wallet_value data (entry_get_raw (transaction_a, key));
prv.decrypt (data.key, empty_password, salt (transaction_a).owords[0]);
nano::public_key compare (nano::pub_key (prv.data));
if (compare == key)
{
// If we successfully decrypted it, rewrite the key back with the correct wallet key
insert_adhoc (transaction_a, prv);
}
}
}
}
version_put (transaction_a, 2);
}
void nano::wallet_store::upgrade_v2_v3 (nano::transaction const & transaction_a)
{
assert (version (transaction_a) == 2);
nano::raw_key seed;
random_pool::generate_block (seed.data.bytes.data (), seed.data.bytes.size ());
seed_set (transaction_a, seed);
entry_put_raw (transaction_a, nano::wallet_store::deterministic_index_special, nano::wallet_value (nano::uint256_union (0), 0));
version_put (transaction_a, 3);
}
void nano::wallet_store::upgrade_v3_v4 (nano::transaction const & transaction_a)
{
assert (version (transaction_a) == 3);
version_put (transaction_a, 4);
assert (valid_password (transaction_a));
nano::raw_key seed;
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::seed_special));
nano::raw_key password_l;
wallet_key (password_l, transaction_a);
seed.decrypt (value.key, password_l, salt (transaction_a).owords[0]);
nano::uint256_union ciphertext;
ciphertext.encrypt (seed, password_l, salt (transaction_a).owords[seed_iv_index]);
entry_put_raw (transaction_a, nano::wallet_store::seed_special, nano::wallet_value (ciphertext, 0));
for (auto i (begin (transaction_a)), n (end ()); i != n; ++i)
{
nano::wallet_value value (i->second);
if (!value.key.is_zero ())
{
switch (key_type (i->second))
{
case nano::key_type::adhoc:
{
nano::raw_key key;
if (fetch (transaction_a, nano::public_key (i->first), key))
{
// Key failed to decrypt despite valid password
key.decrypt (value.key, password_l, salt (transaction_a).owords[0]);
nano::uint256_union new_key_ciphertext;
new_key_ciphertext.encrypt (key, password_l, (nano::uint256_union (i->first)).owords[0].number ());
nano::wallet_value new_value (new_key_ciphertext, value.work);
erase (transaction_a, nano::public_key (i->first));
entry_put_raw (transaction_a, nano::public_key (i->first), new_value);
}
}
case nano::key_type::deterministic:
break;
default:
assert (false);
}
}
}
}
void nano::kdf::phs (nano::raw_key & result_a, std::string const & password_a, nano::uint256_union const & salt_a)
{
static nano::network_params network_params;
std::lock_guard<std::mutex> lock (mutex);
auto success (argon2_hash (1, network_params.kdf_work, 1, password_a.data (), password_a.size (), salt_a.bytes.data (), salt_a.bytes.size (), result_a.data.bytes.data (), result_a.data.bytes.size (), NULL, 0, Argon2_d, 0x10));
assert (success == 0);
(void)success;
}
nano::wallet::wallet (bool & init_a, nano::transaction & transaction_a, nano::wallets & wallets_a, std::string const & wallet_a) :
lock_observer ([](bool, bool) {}),
store (init_a, wallets_a.kdf, transaction_a, wallets_a.node.config.random_representative (), wallets_a.node.config.password_fanout, wallet_a),
wallets (wallets_a)
{
}
nano::wallet::wallet (bool & init_a, nano::transaction & transaction_a, nano::wallets & wallets_a, std::string const & wallet_a, std::string const & json) :
lock_observer ([](bool, bool) {}),
store (init_a, wallets_a.kdf, transaction_a, wallets_a.node.config.random_representative (), wallets_a.node.config.password_fanout, wallet_a, json),
wallets (wallets_a)
{
}
void nano::wallet::enter_initial_password ()
{
nano::raw_key password_l;
{
std::lock_guard<std::recursive_mutex> lock (store.mutex);
store.password.value (password_l);
}
if (password_l.data.is_zero ())
{
auto transaction (wallets.tx_begin_write ());
if (store.valid_password (transaction))
{
// Newly created wallets have a zero key
store.rekey (transaction, "");
}
else
{
enter_password (transaction, "");
}
}
}
bool nano::wallet::enter_password (nano::transaction const & transaction_a, std::string const & password_a)
{
auto result (store.attempt_password (transaction_a, password_a));
if (!result)
{
auto this_l (shared_from_this ());
wallets.node.background ([this_l]() {
this_l->search_pending ();
});
wallets.node.logger.try_log ("Wallet unlocked");
}
else
{
wallets.node.logger.try_log ("Invalid password, wallet locked");
}
lock_observer (result, password_a.empty ());
return result;
}
nano::public_key nano::wallet::deterministic_insert (nano::transaction const & transaction_a, bool generate_work_a)
{
nano::public_key key (0);
if (store.valid_password (transaction_a))
{
key = store.deterministic_insert (transaction_a);
if (generate_work_a)
{
work_ensure (key, key);
}
auto block_transaction (wallets.node.store.tx_begin_read ());
if (wallets.node.ledger.weight (block_transaction, key) >= wallets.node.config.vote_minimum.number ())
{
std::lock_guard<std::mutex> lock (representatives_mutex);
representatives.insert (key);
++wallets.reps_count;
}
}
return key;
}
nano::public_key nano::wallet::deterministic_insert (uint32_t const index, bool generate_work_a)
{
auto transaction (wallets.tx_begin_write ());
nano::public_key key (0);
if (store.valid_password (transaction))
{
key = store.deterministic_insert (transaction, index);
if (generate_work_a)
{
work_ensure (key, key);
}
}
return key;
}
nano::public_key nano::wallet::deterministic_insert (bool generate_work_a)
{
auto transaction (wallets.tx_begin_write ());
auto result (deterministic_insert (transaction, generate_work_a));
return result;
}
nano::public_key nano::wallet::insert_adhoc (nano::transaction const & transaction_a, nano::raw_key const & key_a, bool generate_work_a)
{
nano::public_key key (0);
if (store.valid_password (transaction_a))
{
key = store.insert_adhoc (transaction_a, key_a);
auto block_transaction (wallets.node.store.tx_begin_read ());
if (generate_work_a)
{
work_ensure (key, wallets.node.ledger.latest_root (block_transaction, key));
}
if (wallets.node.ledger.weight (block_transaction, key) >= wallets.node.config.vote_minimum.number ())
{
std::lock_guard<std::mutex> lock (representatives_mutex);
representatives.insert (key);
++wallets.reps_count;
}
}
return key;
}
nano::public_key nano::wallet::insert_adhoc (nano::raw_key const & account_a, bool generate_work_a)
{
auto transaction (wallets.tx_begin_write ());
auto result (insert_adhoc (transaction, account_a, generate_work_a));
return result;
}
void nano::wallet::insert_watch (nano::transaction const & transaction_a, nano::public_key const & pub_a)
{
store.insert_watch (transaction_a, pub_a);
}
bool nano::wallet::exists (nano::public_key const & account_a)
{
auto transaction (wallets.tx_begin_read ());
return store.exists (transaction, account_a);
}
bool nano::wallet::import (std::string const & json_a, std::string const & password_a)
{
auto error (false);
std::unique_ptr<nano::wallet_store> temp;
{
auto transaction (wallets.tx_begin_write ());
nano::uint256_union id;
random_pool::generate_block (id.bytes.data (), id.bytes.size ());
temp.reset (new nano::wallet_store (error, wallets.node.wallets.kdf, transaction, 0, 1, id.to_string (), json_a));
}
if (!error)
{
auto transaction (wallets.tx_begin_write ());
error = temp->attempt_password (transaction, password_a);
}
auto transaction (wallets.tx_begin_write ());
if (!error)
{
error = store.import (transaction, *temp);
}
temp->destroy (transaction);
return error;
}
void nano::wallet::serialize (std::string & json_a)
{
auto transaction (wallets.tx_begin_read ());
store.serialize_json (transaction, json_a);
}
void nano::wallet_store::destroy (nano::transaction const & transaction_a)
{
auto status (mdb_drop (tx (transaction_a), handle, 1));
assert (status == 0);
handle = 0;
}
std::shared_ptr<nano::block> nano::wallet::receive_action (nano::block const & send_a, nano::account const & representative_a, nano::uint128_union const & amount_a, uint64_t work_a, bool generate_work_a)
{
nano::account account;
auto hash (send_a.hash ());
std::shared_ptr<nano::block> block;
if (wallets.node.config.receive_minimum.number () <= amount_a.number ())
{
auto block_transaction (wallets.node.ledger.store.tx_begin_read ());
auto transaction (wallets.tx_begin_read ());
nano::pending_info pending_info;
if (wallets.node.store.block_exists (block_transaction, hash))
{
account = wallets.node.ledger.block_destination (block_transaction, send_a);
if (!wallets.node.ledger.store.pending_get (block_transaction, nano::pending_key (account, hash), pending_info))
{
nano::raw_key prv;
if (!store.fetch (transaction, account, prv))
{
if (work_a == 0)
{
store.work_get (transaction, account, work_a);
}
nano::account_info info;
auto new_account (wallets.node.ledger.store.account_get (block_transaction, account, info));
if (!new_account)
{
std::shared_ptr<nano::block> rep_block = wallets.node.ledger.store.block_get (block_transaction, info.rep_block);
assert (rep_block != nullptr);
block.reset (new nano::state_block (account, info.head, rep_block->representative (), info.balance.number () + pending_info.amount.number (), hash, prv, account, work_a));
}
else
{
block.reset (new nano::state_block (account, 0, representative_a, pending_info.amount, hash, prv, account, work_a));
}
}
else
{
wallets.node.logger.try_log ("Unable to receive, wallet locked");
}
}
else
{
// Ledger doesn't have this marked as available to receive anymore
}
}
else
{
// Ledger doesn't have this block anymore.
}
}
else
{
wallets.node.logger.try_log (boost::str (boost::format ("Not receiving block %1% due to minimum receive threshold") % hash.to_string ()));
// Someone sent us something below the threshold of receiving
}
if (block != nullptr)
{
if (nano::work_validate (*block))
{
wallets.node.logger.try_log (boost::str (boost::format ("Cached or provided work for block %1% account %2% is invalid, regenerating") % block->hash ().to_string () % account.to_account ()));
wallets.node.work_generate_blocking (*block, wallets.node.active.active_difficulty ());
}
wallets.watcher.add (block);
wallets.node.process_active (block);
wallets.node.block_processor.flush ();
if (generate_work_a)
{
work_ensure (account, block->hash ());
}
}
return block;
}
std::shared_ptr<nano::block> nano::wallet::change_action (nano::account const & source_a, nano::account const & representative_a, uint64_t work_a, bool generate_work_a)
{
std::shared_ptr<nano::block> block;
{
auto transaction (wallets.tx_begin_read ());
auto block_transaction (wallets.node.store.tx_begin_read ());
if (store.valid_password (transaction))
{
auto existing (store.find (transaction, source_a));
if (existing != store.end () && !wallets.node.ledger.latest (block_transaction, source_a).is_zero ())
{
nano::account_info info;
auto error1 (wallets.node.ledger.store.account_get (block_transaction, source_a, info));
assert (!error1);
nano::raw_key prv;
auto error2 (store.fetch (transaction, source_a, prv));
assert (!error2);
if (work_a == 0)
{
store.work_get (transaction, source_a, work_a);
}
block.reset (new nano::state_block (source_a, info.head, representative_a, info.balance, 0, prv, source_a, work_a));
}
}
}
if (block != nullptr)
{
if (nano::work_validate (*block))
{
wallets.node.logger.try_log (boost::str (boost::format ("Cached or provided work for block %1% account %2% is invalid, regenerating") % block->hash ().to_string () % source_a.to_account ()));
wallets.node.work_generate_blocking (*block, wallets.node.active.active_difficulty ());
}
wallets.watcher.add (block);
wallets.node.process_active (block);
wallets.node.block_processor.flush ();
if (generate_work_a)
{
work_ensure (source_a, block->hash ());
}
}
return block;
}
std::shared_ptr<nano::block> nano::wallet::send_action (nano::account const & source_a, nano::account const & account_a, nano::uint128_t const & amount_a, uint64_t work_a, bool generate_work_a, boost::optional<std::string> id_a)
{
boost::optional<nano::mdb_val> id_mdb_val;
if (id_a)
{
id_mdb_val = nano::mdb_val (id_a->size (), const_cast<char *> (id_a->data ()));
}
// clang-format off
auto prepare_send = [&id_mdb_val, &wallets = this->wallets, &store = this->store, &source_a, &amount_a, &work_a, &account_a] (const auto & transaction) {
auto block_transaction (wallets.node.store.tx_begin_read ());
auto error (false);
auto cached_block (false);
std::shared_ptr<nano::block> block;
if (id_mdb_val)
{
nano::mdb_val result;
auto status (mdb_get (wallets.env.tx (transaction), wallets.node.wallets.send_action_ids, *id_mdb_val, result));
if (status == 0)
{
nano::uint256_union hash (result);
block = wallets.node.store.block_get (block_transaction, hash);
if (block != nullptr)
{
cached_block = true;
wallets.node.network.flood_block (block);
}
}
else if (status != MDB_NOTFOUND)
{
error = true;
}
}
if (!error && block == nullptr)
{
if (store.valid_password (transaction))
{
auto existing (store.find (transaction, source_a));
if (existing != store.end ())
{
auto balance (wallets.node.ledger.account_balance (block_transaction, source_a));
if (!balance.is_zero () && balance >= amount_a)
{
nano::account_info info;
auto error1 (wallets.node.ledger.store.account_get (block_transaction, source_a, info));
assert (!error1);
nano::raw_key prv;
auto error2 (store.fetch (transaction, source_a, prv));
assert (!error2);
std::shared_ptr<nano::block> rep_block = wallets.node.ledger.store.block_get (block_transaction, info.rep_block);
assert (rep_block != nullptr);
if (work_a == 0)
{
store.work_get (transaction, source_a, work_a);
}
block.reset (new nano::state_block (source_a, info.head, rep_block->representative (), balance - amount_a, account_a, prv, source_a, work_a));
if (id_mdb_val && block != nullptr)
{
auto status (mdb_put (wallets.env.tx (transaction), wallets.node.wallets.send_action_ids, *id_mdb_val, nano::mdb_val (block->hash ()), 0));
if (status != 0)
{
block = nullptr;
error = true;
}
}
}
}
}
}
return std::make_tuple (block, error, cached_block);
};
// clang-format on
std::tuple<std::shared_ptr<nano::block>, bool, bool> result;
{
if (id_mdb_val)
{
result = prepare_send (wallets.tx_begin_write ());
}
else
{
result = prepare_send (wallets.tx_begin_read ());
}
}
std::shared_ptr<nano::block> block;
bool error;
bool cached_block;
std::tie (block, error, cached_block) = result;
if (!error && block != nullptr && !cached_block)
{
if (nano::work_validate (*block))
{
wallets.node.logger.try_log (boost::str (boost::format ("Cached or provided work for block %1% account %2% is invalid, regenerating") % block->hash ().to_string () % account_a.to_account ()));
wallets.node.work_generate_blocking (*block, wallets.node.active.active_difficulty ());
}
wallets.watcher.add (block);
wallets.node.process_active (block);
wallets.node.block_processor.flush ();
if (generate_work_a)
{
work_ensure (source_a, block->hash ());
}
}
return block;
}
bool nano::wallet::change_sync (nano::account const & source_a, nano::account const & representative_a)
{
std::promise<bool> result;
std::future<bool> future = result.get_future ();
// clang-format off
change_async (source_a, representative_a, [&result](std::shared_ptr<nano::block> block_a) {
result.set_value (block_a == nullptr);
},
true);
// clang-format on
return future.get ();
}
void nano::wallet::change_async (nano::account const & source_a, nano::account const & representative_a, std::function<void(std::shared_ptr<nano::block>)> const & action_a, uint64_t work_a, bool generate_work_a)
{
auto this_l (shared_from_this ());
wallets.node.wallets.queue_wallet_action (nano::wallets::high_priority, this_l, [this_l, source_a, representative_a, action_a, work_a, generate_work_a](nano::wallet & wallet_a) {
auto block (wallet_a.change_action (source_a, representative_a, work_a, generate_work_a));
action_a (block);
});
}
bool nano::wallet::receive_sync (std::shared_ptr<nano::block> block_a, nano::account const & representative_a, nano::uint128_t const & amount_a)
{
std::promise<bool> result;
std::future<bool> future = result.get_future ();
// clang-format off
receive_async (block_a, representative_a, amount_a, [&result](std::shared_ptr<nano::block> block_a) {
result.set_value (block_a == nullptr);
},
true);
// clang-format on
return future.get ();
}
void nano::wallet::receive_async (std::shared_ptr<nano::block> block_a, nano::account const & representative_a, nano::uint128_t const & amount_a, std::function<void(std::shared_ptr<nano::block>)> const & action_a, uint64_t work_a, bool generate_work_a)
{
auto this_l (shared_from_this ());
wallets.node.wallets.queue_wallet_action (amount_a, this_l, [this_l, block_a, representative_a, amount_a, action_a, work_a, generate_work_a](nano::wallet & wallet_a) {
auto block (wallet_a.receive_action (*block_a, representative_a, amount_a, work_a, generate_work_a));
action_a (block);
});
}
nano::block_hash nano::wallet::send_sync (nano::account const & source_a, nano::account const & account_a, nano::uint128_t const & amount_a)
{
std::promise<nano::block_hash> result;
std::future<nano::block_hash> future = result.get_future ();
// clang-format off
send_async (source_a, account_a, amount_a, [&result](std::shared_ptr<nano::block> block_a) {
result.set_value (block_a->hash ());
},
true);
// clang-format on
return future.get ();
}
void nano::wallet::send_async (nano::account const & source_a, nano::account const & account_a, nano::uint128_t const & amount_a, std::function<void(std::shared_ptr<nano::block>)> const & action_a, uint64_t work_a, bool generate_work_a, boost::optional<std::string> id_a)
{
auto this_l (shared_from_this ());
wallets.node.wallets.queue_wallet_action (nano::wallets::high_priority, this_l, [this_l, source_a, account_a, amount_a, action_a, work_a, generate_work_a, id_a](nano::wallet & wallet_a) {
auto block (wallet_a.send_action (source_a, account_a, amount_a, work_a, generate_work_a, id_a));
action_a (block);
});
}
// Update work for account if latest root is root_a
void nano::wallet::work_update (nano::transaction const & transaction_a, nano::account const & account_a, nano::block_hash const & root_a, uint64_t work_a)
{
assert (!nano::work_validate (root_a, work_a));
assert (store.exists (transaction_a, account_a));
auto block_transaction (wallets.node.store.tx_begin_read ());
auto latest (wallets.node.ledger.latest_root (block_transaction, account_a));
if (latest == root_a)
{
store.work_put (transaction_a, account_a, work_a);
}
else
{
wallets.node.logger.try_log ("Cached work no longer valid, discarding");
}
}
void nano::wallet::work_ensure (nano::account const & account_a, nano::block_hash const & hash_a)
{
wallets.node.wallets.queue_wallet_action (nano::wallets::generate_priority, shared_from_this (), [account_a, hash_a](nano::wallet & wallet_a) {
wallet_a.work_cache_blocking (account_a, hash_a);
});
}
bool nano::wallet::search_pending ()
{
auto transaction (wallets.tx_begin_read ());
auto result (!store.valid_password (transaction));
if (!result)
{
wallets.node.logger.try_log ("Beginning pending block search");
for (auto i (store.begin (transaction)), n (store.end ()); i != n; ++i)
{
auto block_transaction (wallets.node.store.tx_begin_read ());
nano::account account (i->first);
// Don't search pending for watch-only accounts
if (!nano::wallet_value (i->second).key.is_zero ())
{
for (auto j (wallets.node.store.pending_begin (block_transaction, nano::pending_key (account, 0))); nano::pending_key (j->first).account == account; ++j)
{
nano::pending_key key (j->first);
auto hash (key.hash);
nano::pending_info pending (j->second);
auto amount (pending.amount.number ());
if (wallets.node.config.receive_minimum.number () <= amount)
{
wallets.node.logger.try_log (boost::str (boost::format ("Found a pending block %1% for account %2%") % hash.to_string () % pending.source.to_account ()));
auto block (wallets.node.store.block_get (block_transaction, hash));
if (wallets.node.block_confirmed_or_being_confirmed (block_transaction, hash))
{
// Receive confirmed block
auto node_l (wallets.node.shared ());
wallets.node.background ([node_l, block, hash]() {
auto transaction (node_l->store.tx_begin_read ());
node_l->receive_confirmed (transaction, block, hash);
});
}
else
{
// Request confirmation for unconfirmed block
wallets.node.block_confirm (block);
}
}
}
}
}
wallets.node.logger.try_log ("Pending block search phase complete");
}
else
{
wallets.node.logger.try_log ("Stopping search, wallet is locked");
}
return result;
}
void nano::wallet::init_free_accounts (nano::transaction const & transaction_a)
{
free_accounts.clear ();
for (auto i (store.begin (transaction_a)), n (store.end ()); i != n; ++i)
{
free_accounts.insert (nano::uint256_union (i->first));
}
}
uint32_t nano::wallet::deterministic_check (nano::transaction const & transaction_a, uint32_t index)
{
auto block_transaction (wallets.node.store.tx_begin_read ());
for (uint32_t i (index + 1), n (index + 64); i < n; ++i)
{
nano::raw_key prv;
store.deterministic_key (prv, transaction_a, i);
nano::keypair pair (prv.data.to_string ());
// Check if account received at least 1 block
auto latest (wallets.node.ledger.latest (block_transaction, pair.pub));
if (!latest.is_zero ())
{
index = i;
// i + 64 - Check additional 64 accounts
// i/64 - Check additional accounts for large wallets. I.e. 64000/64 = 1000 accounts to check
n = i + 64 + (i / 64);
}
else
{
// Check if there are pending blocks for account
for (auto ii (wallets.node.store.pending_begin (block_transaction, nano::pending_key (pair.pub, 0))); nano::pending_key (ii->first).account == pair.pub; ++ii)
{
index = i;
n = i + 64 + (i / 64);
break;
}
}
}
return index;
}
nano::public_key nano::wallet::change_seed (nano::transaction const & transaction_a, nano::raw_key const & prv_a, uint32_t count)
{
store.seed_set (transaction_a, prv_a);
auto account = deterministic_insert (transaction_a);
if (count == 0)
{
count = deterministic_check (transaction_a, 0);
}
for (uint32_t i (0); i < count; ++i)
{
// Disable work generation to prevent weak CPU nodes stuck
account = deterministic_insert (transaction_a, false);
}
return account;
}
void nano::wallet::deterministic_restore (nano::transaction const & transaction_a)
{
auto index (store.deterministic_index_get (transaction_a));
auto new_index (deterministic_check (transaction_a, index));
for (uint32_t i (index); i <= new_index && index != new_index; ++i)
{
// Disable work generation to prevent weak CPU nodes stuck
deterministic_insert (transaction_a, false);
}
}
bool nano::wallet::live ()
{
return store.handle != 0;
}
void nano::wallet::work_cache_blocking (nano::account const & account_a, nano::block_hash const & root_a)
{
auto begin (std::chrono::steady_clock::now ());
auto work (wallets.node.work_generate_blocking (root_a));
if (wallets.node.config.logging.work_generation_time ())
{
/*
* The difficulty parameter is the second parameter for `work_generate_blocking()`,
* currently we don't supply one so we must fetch the default value.
*/
auto difficulty (wallets.node.network_params.network.publish_threshold);
wallets.node.logger.try_log ("Work generation for ", root_a.to_string (), ", with a difficulty of ", difficulty, " complete: ", (std::chrono::duration_cast<std::chrono::microseconds> (std::chrono::steady_clock::now () - begin).count ()), " us");
}
auto transaction (wallets.tx_begin_write ());
if (live () && store.exists (transaction, account_a))
{
work_update (transaction, account_a, root_a, work);
}
}
nano::work_watcher::work_watcher (nano::node & node_a) :
node (node_a),
stopped (false),
thread ([this]() {
nano::thread_role::set (nano::thread_role::name::work_watcher);
run (); })
{
}
nano::work_watcher::~work_watcher ()
{
stop ();
}
void nano::work_watcher::stop ()
{
std::unique_lock<std::mutex> lock (mutex);
blocks.clear ();
stopped = true;
condition.notify_all ();
lock.unlock ();
if (thread.joinable ())
{
thread.join ();
}
}
void nano::work_watcher::run ()
{
std::unique_lock<std::mutex> lock (mutex);
std::chrono::steady_clock::time_point next_attempt;
while (!stopped)
{
condition.wait_until (lock, next_attempt, [this, &next_attempt]() {
return stopped || next_attempt < std::chrono::steady_clock::now ();
});
next_attempt = std::chrono::steady_clock::now () + std::chrono::seconds (5);
for (auto i (blocks.begin ()), n (blocks.end ()); i != n;)
{
std::unique_lock<std::mutex> lock (node.active.mutex);
auto confirmed (false);
auto existing (node.active.roots.find ((i->second)->qualified_root ()));
if (node.active.roots.end () != existing)
{
//block may not be in existing yet
confirmed = existing->election->confirmed.load ();
}
else
{
//and so we fall back to ledger confirmation
auto transaction (this->node.store.tx_begin_read ());
auto block = this->node.store.block_get (transaction, (i->second)->hash ());
if (block)
{
confirmed = this->node.block_confirmed_or_being_confirmed (transaction, (i->second)->hash ());
}
}
lock.unlock ();
if (confirmed)
{
i = blocks.erase (i);
}
else
{
++i;
}
}
for (auto & i : blocks)
{
uint64_t difficulty (0);
nano::work_validate (i.second->root (), i.second->block_work (), &difficulty);
if (node.active.active_difficulty () > difficulty)
{
lock.unlock ();
nano::state_block_builder builder;
std::error_code ec;
builder.from (*i.second);
builder.work (node.work_generate_blocking (i.second->root (), node.active.active_difficulty ()));
std::shared_ptr<state_block> block (builder.build (ec));
if (!ec)
{
{
std::lock_guard<std::mutex> active_lock (node.active.mutex);
auto existing (node.active.roots.find (i.second->qualified_root ()));
if (existing != node.active.roots.end ())
{
auto election (existing->election);
if (election->status.winner->hash () == i.second->hash ())
{
election->status.winner = block;
}
auto current (election->blocks.find (block->hash ()));
assert (current != election->blocks.end ());
current->second = block;
}
}
node.network.flood_block (block);
node.active.update_difficulty (*block.get ());
lock.lock ();
i.second = block;
lock.unlock ();
}
lock.lock ();
}
}
} // !stopped
}
void nano::work_watcher::add (std::shared_ptr<nano::block> block_a)
{
auto block_l (std::dynamic_pointer_cast<nano::state_block> (block_a));
if (!stopped && block_l != nullptr)
{
std::lock_guard<std::mutex> lock (mutex);
blocks[block_l->qualified_root ()] = block_l;
}
}
void nano::wallets::do_wallet_actions ()
{
std::unique_lock<std::mutex> action_lock (action_mutex);
while (!stopped)
{
if (!actions.empty ())
{
auto first (actions.begin ());
auto wallet (first->second.first);
auto current (std::move (first->second.second));
actions.erase (first);
if (wallet->live ())
{
action_lock.unlock ();
observer (true);
current (*wallet);
observer (false);
action_lock.lock ();
}
}
else
{
condition.wait (action_lock);
}
}
}
nano::wallets::wallets (bool error_a, nano::node & node_a) :
observer ([](bool) {}),
node (node_a),
env (boost::polymorphic_downcast<nano::mdb_wallets_store *> (node_a.wallets_store_impl.get ())->environment),
stopped (false),
watcher (node_a),
thread ([this]() {
nano::thread_role::set (nano::thread_role::name::wallet_actions);
do_wallet_actions ();
})
{
std::unique_lock<std::mutex> lock (mutex);
if (!error_a)
{
auto transaction (tx_begin_write ());
auto status (mdb_dbi_open (env.tx (transaction), nullptr, MDB_CREATE, &handle));
split_if_needed (transaction, node.store);
status |= mdb_dbi_open (env.tx (transaction), "send_action_ids", MDB_CREATE, &send_action_ids);
assert (status == 0);
std::string beginning (nano::uint256_union (0).to_string ());
std::string end ((nano::uint256_union (nano::uint256_t (0) - nano::uint256_t (1))).to_string ());
nano::store_iterator<std::array<char, 64>, nano::no_value> i (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction, handle, nano::mdb_val (beginning.size (), const_cast<char *> (beginning.c_str ()))));
nano::store_iterator<std::array<char, 64>, nano::no_value> n (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction, handle, nano::mdb_val (end.size (), const_cast<char *> (end.c_str ()))));
for (; i != n; ++i)
{
nano::uint256_union id;
std::string text (i->first.data (), i->first.size ());
auto error (id.decode_hex (text));
assert (!error);
assert (items.find (id) == items.end ());
auto wallet (std::make_shared<nano::wallet> (error, transaction, *this, text));
if (!error)
{
items[id] = wallet;
}
else
{
// Couldn't open wallet
}
}
}
for (auto & item : items)
{
item.second->enter_initial_password ();
}
if (node_a.config.enable_voting)
{
lock.unlock ();
ongoing_compute_reps ();
}
}
nano::wallets::~wallets ()
{
stop ();
}
std::shared_ptr<nano::wallet> nano::wallets::open (nano::uint256_union const & id_a)
{
std::lock_guard<std::mutex> lock (mutex);
std::shared_ptr<nano::wallet> result;
auto existing (items.find (id_a));
if (existing != items.end ())
{
result = existing->second;
}
return result;
}
std::shared_ptr<nano::wallet> nano::wallets::create (nano::uint256_union const & id_a)
{
std::lock_guard<std::mutex> lock (mutex);
assert (items.find (id_a) == items.end ());
std::shared_ptr<nano::wallet> result;
bool error;
{
auto transaction (tx_begin_write ());
result = std::make_shared<nano::wallet> (error, transaction, *this, id_a.to_string ());
}
if (!error)
{
items[id_a] = result;
result->enter_initial_password ();
}
return result;
}
bool nano::wallets::search_pending (nano::uint256_union const & wallet_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto result (false);
auto existing (items.find (wallet_a));
result = existing == items.end ();
if (!result)
{
auto wallet (existing->second);
result = wallet->search_pending ();
}
return result;
}
void nano::wallets::search_pending_all ()
{
std::lock_guard<std::mutex> lock (mutex);
for (auto i : items)
{
i.second->search_pending ();
}
}
void nano::wallets::destroy (nano::uint256_union const & id_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto transaction (tx_begin_write ());
// action_mutex should be after transactions to prevent deadlocks in deterministic_insert () & insert_adhoc ()
std::lock_guard<std::mutex> action_lock (action_mutex);
auto existing (items.find (id_a));
assert (existing != items.end ());
auto wallet (existing->second);
items.erase (existing);
wallet->store.destroy (transaction);
}
void nano::wallets::reload ()
{
std::lock_guard<std::mutex> lock (mutex);
auto transaction (tx_begin_write ());
std::unordered_set<nano::uint256_union> stored_items;
std::string beginning (nano::uint256_union (0).to_string ());
std::string end ((nano::uint256_union (nano::uint256_t (0) - nano::uint256_t (1))).to_string ());
nano::store_iterator<std::array<char, 64>, nano::no_value> i (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction, handle, nano::mdb_val (beginning.size (), const_cast<char *> (beginning.c_str ()))));
nano::store_iterator<std::array<char, 64>, nano::no_value> n (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction, handle, nano::mdb_val (end.size (), const_cast<char *> (end.c_str ()))));
for (; i != n; ++i)
{
nano::uint256_union id;
std::string text (i->first.data (), i->first.size ());
auto error (id.decode_hex (text));
assert (!error);
// New wallet
if (items.find (id) == items.end ())
{
auto wallet (std::make_shared<nano::wallet> (error, transaction, *this, text));
if (!error)
{
items[id] = wallet;
}
}
// List of wallets on disk
stored_items.insert (id);
}
// Delete non existing wallets from memory
std::vector<nano::uint256_union> deleted_items;
for (auto i : items)
{
if (stored_items.find (i.first) == stored_items.end ())
{
deleted_items.push_back (i.first);
}
}
for (auto & i : deleted_items)
{
assert (items.find (i) == items.end ());
items.erase (i);
}
}
void nano::wallets::queue_wallet_action (nano::uint128_t const & amount_a, std::shared_ptr<nano::wallet> wallet_a, std::function<void(nano::wallet &)> const & action_a)
{
{
std::lock_guard<std::mutex> action_lock (action_mutex);
actions.insert (std::make_pair (amount_a, std::make_pair (wallet_a, std::move (action_a))));
}
condition.notify_all ();
}
void nano::wallets::foreach_representative (nano::transaction const & transaction_a, std::function<void(nano::public_key const & pub_a, nano::raw_key const & prv_a)> const & action_a)
{
if (node.config.enable_voting)
{
std::lock_guard<std::mutex> lock (mutex);
auto transaction_l (tx_begin_read ());
for (auto i (items.begin ()), n (items.end ()); i != n; ++i)
{
auto & wallet (*i->second);
std::lock_guard<std::recursive_mutex> store_lock (wallet.store.mutex);
std::lock_guard<std::mutex> representatives_lock (wallet.representatives_mutex);
for (auto ii (wallet.representatives.begin ()), nn (wallet.representatives.end ()); ii != nn; ++ii)
{
nano::account account (*ii);
if (wallet.store.exists (transaction_l, account))
{
if (!node.ledger.weight (transaction_a, account).is_zero ())
{
if (wallet.store.valid_password (transaction_l))
{
nano::raw_key prv;
auto error (wallet.store.fetch (transaction_l, account, prv));
assert (!error);
action_a (account, prv);
}
else
{
static auto last_log = std::chrono::steady_clock::time_point ();
if (last_log < std::chrono::steady_clock::now () - std::chrono::seconds (60))
{
last_log = std::chrono::steady_clock::now ();
node.logger.always_log (boost::str (boost::format ("Representative locked inside wallet %1%") % i->first.to_string ()));
}
}
}
}
}
}
}
}
bool nano::wallets::exists (nano::transaction const & transaction_a, nano::public_key const & account_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto result (false);
for (auto i (items.begin ()), n (items.end ()); !result && i != n; ++i)
{
result = i->second->store.exists (transaction_a, account_a);
}
return result;
}
void nano::wallets::stop ()
{
{
std::lock_guard<std::mutex> action_lock (action_mutex);
stopped = true;
actions.clear ();
}
condition.notify_all ();
if (thread.joinable ())
{
thread.join ();
}
}
nano::write_transaction nano::wallets::tx_begin_write ()
{
return env.tx_begin_write ();
}
nano::read_transaction nano::wallets::tx_begin_read ()
{
return env.tx_begin_read ();
}
void nano::wallets::clear_send_ids (nano::transaction const & transaction_a)
{
auto status (mdb_drop (env.tx (transaction_a), send_action_ids, 0));
assert (status == 0);
}
void nano::wallets::compute_reps ()
{
std::lock_guard<std::mutex> lock (mutex);
reps_count = 0;
auto ledger_transaction (node.store.tx_begin_read ());
auto transaction (tx_begin_read ());
for (auto i (items.begin ()), n (items.end ()); i != n; ++i)
{
auto & wallet (*i->second);
decltype (wallet.representatives) representatives_l;
for (auto ii (wallet.store.begin (transaction)), nn (wallet.store.end ()); ii != nn; ++ii)
{
auto account (ii->first);
if (node.ledger.weight (ledger_transaction, account) >= node.config.vote_minimum.number ())
{
representatives_l.insert (account);
++reps_count;
}
}
std::lock_guard<std::mutex> representatives_lock (wallet.representatives_mutex);
wallet.representatives.swap (representatives_l);
}
}
void nano::wallets::ongoing_compute_reps ()
{
compute_reps ();
auto & node_l (node);
auto compute_delay (network_params.network.is_test_network () ? std::chrono::milliseconds (10) : std::chrono::milliseconds (15 * 60 * 1000)); // Representation drifts quickly on the test network but very slowly on the live network
node.alarm.add (std::chrono::steady_clock::now () + compute_delay, [&node_l]() {
node_l.wallets.ongoing_compute_reps ();
});
}
void nano::wallets::split_if_needed (nano::transaction & transaction_destination, nano::block_store & store_a)
{
auto store_l (dynamic_cast<nano::mdb_store *> (&store_a));
if (store_l != nullptr)
{
if (items.empty ())
{
std::string beginning (nano::uint256_union (0).to_string ());
std::string end ((nano::uint256_union (nano::uint256_t (0) - nano::uint256_t (1))).to_string ());
// clang-format off
auto get_store_it = [&handle = handle](nano::transaction const & transaction_source, std::string const & hash) {
return nano::store_iterator<std::array<char, 64>, nano::no_value> (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction_source, handle, nano::mdb_val (hash.size (), const_cast<char *> (hash.c_str ()))));
};
// clang-format on
// First do a read pass to check if there are any wallets that need extracting (to save holding a write lock and potentially being blocked)
auto wallets_need_splitting (false);
{
auto transaction_source (store_l->tx_begin_read ());
auto i = get_store_it (transaction_source, beginning);
auto n = get_store_it (transaction_source, end);
wallets_need_splitting = (i != n);
}
if (wallets_need_splitting)
{
auto transaction_source (store_l->tx_begin_write ());
auto i = get_store_it (transaction_source, beginning);
auto n = get_store_it (transaction_source, end);
auto tx_source = static_cast<MDB_txn *> (transaction_source.get_handle ());
auto tx_destination = static_cast<MDB_txn *> (transaction_destination.get_handle ());
for (; i != n; ++i)
{
nano::uint256_union id;
std::string text (i->first.data (), i->first.size ());
auto error1 (id.decode_hex (text));
assert (!error1);
assert (strlen (text.c_str ()) == text.size ());
move_table (text, tx_source, tx_destination);
}
}
}
}
}
void nano::wallets::move_table (std::string const & name_a, MDB_txn * tx_source, MDB_txn * tx_destination)
{
MDB_dbi handle_source;
auto error2 (mdb_dbi_open (tx_source, name_a.c_str (), MDB_CREATE, &handle_source));
assert (!error2);
MDB_dbi handle_destination;
auto error3 (mdb_dbi_open (tx_destination, name_a.c_str (), MDB_CREATE, &handle_destination));
assert (!error3);
MDB_cursor * cursor;
auto error4 (mdb_cursor_open (tx_source, handle_source, &cursor));
assert (!error4);
MDB_val val_key;
MDB_val val_value;
auto cursor_status (mdb_cursor_get (cursor, &val_key, &val_value, MDB_FIRST));
while (cursor_status == MDB_SUCCESS)
{
auto error5 (mdb_put (tx_destination, handle_destination, &val_key, &val_value, 0));
assert (!error5);
cursor_status = mdb_cursor_get (cursor, &val_key, &val_value, MDB_NEXT);
}
auto error6 (mdb_drop (tx_source, handle_source, 1));
assert (!error6);
}
nano::uint128_t const nano::wallets::generate_priority = std::numeric_limits<nano::uint128_t>::max ();
nano::uint128_t const nano::wallets::high_priority = std::numeric_limits<nano::uint128_t>::max () - 1;
nano::store_iterator<nano::uint256_union, nano::wallet_value> nano::wallet_store::begin (nano::transaction const & transaction_a)
{
nano::store_iterator<nano::uint256_union, nano::wallet_value> result (std::make_unique<nano::mdb_iterator<nano::uint256_union, nano::wallet_value>> (transaction_a, handle, nano::mdb_val (nano::uint256_union (special_count))));
return result;
}
nano::store_iterator<nano::uint256_union, nano::wallet_value> nano::wallet_store::begin (nano::transaction const & transaction_a, nano::uint256_union const & key)
{
nano::store_iterator<nano::uint256_union, nano::wallet_value> result (std::make_unique<nano::mdb_iterator<nano::uint256_union, nano::wallet_value>> (transaction_a, handle, nano::mdb_val (key)));
return result;
}
nano::store_iterator<nano::uint256_union, nano::wallet_value> nano::wallet_store::find (nano::transaction const & transaction_a, nano::uint256_union const & key)
{
auto result (begin (transaction_a, key));
nano::store_iterator<nano::uint256_union, nano::wallet_value> end (nullptr);
if (result != end)
{
if (nano::uint256_union (result->first) == key)
{
return result;
}
else
{
return end;
}
}
else
{
return end;
}
return result;
}
nano::store_iterator<nano::uint256_union, nano::wallet_value> nano::wallet_store::end ()
{
return nano::store_iterator<nano::uint256_union, nano::wallet_value> (nullptr);
}
nano::mdb_wallets_store::mdb_wallets_store (bool & error_a, boost::filesystem::path const & path_a, int lmdb_max_dbs) :
environment (error_a, path_a, lmdb_max_dbs, 1ULL * 1024 * 1024 * 1024)
{
}
MDB_txn * nano::wallet_store::tx (nano::transaction const & transaction_a) const
{
return static_cast<MDB_txn *> (transaction_a.get_handle ());
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (wallets & wallets, const std::string & name)
{
size_t items_count = 0;
size_t actions_count = 0;
{
std::lock_guard<std::mutex> guard (wallets.mutex);
items_count = wallets.items.size ();
actions_count = wallets.actions.size ();
}
auto composite = std::make_unique<seq_con_info_composite> (name);
auto sizeof_item_element = sizeof (decltype (wallets.items)::value_type);
auto sizeof_actions_element = sizeof (decltype (wallets.actions)::value_type);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "items", items_count, sizeof_item_element }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "actions_count", actions_count, sizeof_actions_element }));
return composite;
}
}
| 1 | 15,593 | could be `const &` ? | nanocurrency-nano-node | cpp |
@@ -75,6 +75,7 @@ module.exports = class Dashboard extends Plugin {
defaultTabIcon: defaultTabIcon,
showProgressDetails: false,
hideUploadButton: false,
+ hideProgressAfterFinish: false,
note: null,
closeModalOnClickOutside: false,
locale: defaultLocale, | 1 | const Plugin = require('../../core/Plugin')
const Translator = require('../../core/Translator')
const dragDrop = require('drag-drop')
const DashboardUI = require('./Dashboard')
const StatusBar = require('../StatusBar')
const Informer = require('../Informer')
const { findAllDOMElements, toArray } = require('../../core/Utils')
const prettyBytes = require('prettier-bytes')
const { defaultTabIcon } = require('./icons')
// some code for managing focus was adopted from https://github.com/ghosh/micromodal
// MIT licence, https://github.com/ghosh/micromodal/blob/master/LICENSE.md
// Copyright (c) 2017 Indrashish Ghosh
const FOCUSABLE_ELEMENTS = [
'a[href]',
'area[href]',
'input:not([disabled]):not([type="hidden"]):not([hidden])',
'select:not([disabled])',
'textarea:not([disabled])',
'button:not([disabled])',
'iframe',
'object',
'embed',
'[contenteditable]',
'[tabindex]:not([tabindex^="-"])'
]
/**
* Dashboard UI with previews, metadata editing, tabs for various services and more
*/
module.exports = class Dashboard extends Plugin {
constructor (uppy, opts) {
super(uppy, opts)
this.id = this.opts.id || 'Dashboard'
this.title = 'Dashboard'
this.type = 'orchestrator'
const defaultLocale = {
strings: {
selectToUpload: 'Select files to upload',
closeModal: 'Close Modal',
upload: 'Upload',
importFrom: 'Import files from',
dashboardWindowTitle: 'Uppy Dashboard Window (Press escape to close)',
dashboardTitle: 'Uppy Dashboard',
copyLinkToClipboardSuccess: 'Link copied to clipboard.',
copyLinkToClipboardFallback: 'Copy the URL below',
fileSource: 'File source',
done: 'Done',
localDisk: 'Local Disk',
myDevice: 'My Device',
dropPasteImport: 'Drop files here, paste, import from one of the locations above or',
dropPaste: 'Drop files here, paste or',
browse: 'browse',
fileProgress: 'File progress: upload speed and ETA',
numberOfSelectedFiles: 'Number of selected files',
uploadAllNewFiles: 'Upload all new files',
emptyFolderAdded: 'No files were added from empty folder',
folderAdded: {
0: 'Added %{smart_count} file from %{folder}',
1: 'Added %{smart_count} files from %{folder}'
}
}
}
// set default options
const defaultOptions = {
target: 'body',
metaFields: [],
trigger: '#uppy-select-files',
inline: false,
width: 750,
height: 550,
semiTransparent: false,
defaultTabIcon: defaultTabIcon,
showProgressDetails: false,
hideUploadButton: false,
note: null,
closeModalOnClickOutside: false,
locale: defaultLocale,
onRequestCloseModal: () => this.closeModal()
}
// merge default options with the ones set by user
this.opts = Object.assign({}, defaultOptions, opts)
this.locale = Object.assign({}, defaultLocale, this.opts.locale)
this.locale.strings = Object.assign({}, defaultLocale.strings, this.opts.locale.strings)
this.translator = new Translator({locale: this.locale})
this.i18n = this.translator.translate.bind(this.translator)
this.closeModal = this.closeModal.bind(this)
this.requestCloseModal = this.requestCloseModal.bind(this)
this.openModal = this.openModal.bind(this)
this.isModalOpen = this.isModalOpen.bind(this)
this.addTarget = this.addTarget.bind(this)
this.hideAllPanels = this.hideAllPanels.bind(this)
this.showPanel = this.showPanel.bind(this)
this.getFocusableNodes = this.getFocusableNodes.bind(this)
this.setFocusToFirstNode = this.setFocusToFirstNode.bind(this)
this.maintainFocus = this.maintainFocus.bind(this)
this.initEvents = this.initEvents.bind(this)
this.onKeydown = this.onKeydown.bind(this)
this.handleClickOutside = this.handleClickOutside.bind(this)
this.handleFileCard = this.handleFileCard.bind(this)
this.handleDrop = this.handleDrop.bind(this)
this.handlePaste = this.handlePaste.bind(this)
this.handleInputChange = this.handleInputChange.bind(this)
this.updateDashboardElWidth = this.updateDashboardElWidth.bind(this)
this.render = this.render.bind(this)
this.install = this.install.bind(this)
}
addTarget (plugin) {
const callerPluginId = plugin.id || plugin.constructor.name
const callerPluginName = plugin.title || callerPluginId
const callerPluginType = plugin.type
if (callerPluginType !== 'acquirer' &&
callerPluginType !== 'progressindicator' &&
callerPluginType !== 'presenter') {
let msg = 'Dashboard: Modal can only be used by plugins of types: acquirer, progressindicator, presenter'
this.uppy.log(msg)
return
}
const target = {
id: callerPluginId,
name: callerPluginName,
type: callerPluginType
}
const state = this.getPluginState()
const newTargets = state.targets.slice()
newTargets.push(target)
this.setPluginState({
targets: newTargets
})
return this.el
}
hideAllPanels () {
this.setPluginState({
activePanel: false
})
}
showPanel (id) {
const { targets } = this.getPluginState()
const activePanel = targets.filter((target) => {
return target.type === 'acquirer' && target.id === id
})[0]
this.setPluginState({
activePanel: activePanel
})
}
requestCloseModal () {
if (this.opts.onRequestCloseModal) {
return this.opts.onRequestCloseModal()
} else {
this.closeModal()
}
}
getFocusableNodes () {
const nodes = this.el.querySelectorAll(FOCUSABLE_ELEMENTS)
return Object.keys(nodes).map((key) => nodes[key])
}
setFocusToFirstNode () {
const focusableNodes = this.getFocusableNodes()
if (focusableNodes.length) focusableNodes[0].focus()
}
maintainFocus (event) {
var focusableNodes = this.getFocusableNodes()
var focusedItemIndex = focusableNodes.indexOf(document.activeElement)
if (event.shiftKey && focusedItemIndex === 0) {
focusableNodes[focusableNodes.length - 1].focus()
event.preventDefault()
}
if (!event.shiftKey && focusedItemIndex === focusableNodes.length - 1) {
focusableNodes[0].focus()
event.preventDefault()
}
}
openModal () {
this.setPluginState({
isHidden: false
})
// save scroll position
this.savedDocumentScrollPosition = window.scrollY
// add class to body that sets position fixed, move everything back
// to scroll position
document.body.classList.add('uppy-Dashboard-isOpen')
document.body.style.top = `-${this.savedDocumentScrollPosition}px`
this.updateDashboardElWidth()
this.setFocusToFirstNode()
// timeout is needed because yo-yo/morphdom/nanoraf; not needed without nanoraf
// setTimeout(this.setFocusToFirstNode, 100)
// setTimeout(this.updateDashboardElWidth, 100)
}
closeModal () {
this.setPluginState({
isHidden: true
})
document.body.classList.remove('uppy-Dashboard-isOpen')
window.scrollTo(0, this.savedDocumentScrollPosition)
}
isModalOpen () {
return !this.getPluginState().isHidden || false
}
onKeydown (event) {
// close modal on esc key press
if (event.keyCode === 27) this.requestCloseModal(event)
// maintainFocus on tab key press
if (event.keyCode === 9) this.maintainFocus(event)
}
handleClickOutside () {
if (this.opts.closeModalOnClickOutside) this.requestCloseModal()
}
handlePaste (ev) {
const files = toArray(ev.clipboardData.items)
files.forEach((file) => {
if (file.kind !== 'file') return
const blob = file.getAsFile()
if (!blob) {
this.uppy.log('[Dashboard] File pasted, but the file blob is empty')
this.uppy.info('Error pasting file', 'error')
return
}
this.uppy.log('[Dashboard] File pasted')
this.uppy.addFile({
source: this.id,
name: file.name,
type: file.type,
data: blob
})
})
}
handleInputChange (ev) {
ev.preventDefault()
const files = toArray(ev.target.files)
files.forEach((file) => {
this.uppy.addFile({
source: this.id,
name: file.name,
type: file.type,
data: file
})
})
}
initEvents () {
// Modal open button
const showModalTrigger = findAllDOMElements(this.opts.trigger)
if (!this.opts.inline && showModalTrigger) {
showModalTrigger.forEach(trigger => trigger.addEventListener('click', this.openModal))
}
if (!this.opts.inline && !showModalTrigger) {
this.uppy.log('Dashboard modal trigger not found, you won’t be able to select files. Make sure `trigger` is set correctly in Dashboard options', 'error')
}
if (!this.opts.inline) {
document.addEventListener('keydown', this.onKeydown)
}
// Drag Drop
this.removeDragDropListener = dragDrop(this.el, (files) => {
this.handleDrop(files)
})
this.uppy.on('dashboard:file-card', this.handleFileCard)
this.updateDashboardElWidth()
window.addEventListener('resize', this.updateDashboardElWidth)
}
removeEvents () {
const showModalTrigger = findAllDOMElements(this.opts.trigger)
if (!this.opts.inline && showModalTrigger) {
showModalTrigger.forEach(trigger => trigger.removeEventListener('click', this.openModal))
}
if (!this.opts.inline) {
document.removeEventListener('keydown', this.onKeydown)
}
this.removeDragDropListener()
this.uppy.off('dashboard:file-card', this.handleFileCard)
window.removeEventListener('resize', this.updateDashboardElWidth)
}
updateDashboardElWidth () {
const dashboardEl = this.el.querySelector('.uppy-Dashboard-inner')
this.uppy.log(`Dashboard width: ${dashboardEl.offsetWidth}`)
this.setPluginState({
containerWidth: dashboardEl.offsetWidth
})
}
handleFileCard (fileId) {
this.setPluginState({
fileCardFor: fileId || false
})
}
handleDrop (files) {
this.uppy.log('[Dashboard] Files were dropped')
files.forEach((file) => {
this.uppy.addFile({
source: this.id,
name: file.name,
type: file.type,
data: file
})
})
}
render (state) {
const pluginState = this.getPluginState()
const files = state.files
const newFiles = Object.keys(files).filter((file) => {
return !files[file].progress.uploadStarted
})
const inProgressFiles = Object.keys(files).filter((file) => {
return !files[file].progress.uploadComplete &&
files[file].progress.uploadStarted &&
!files[file].isPaused
})
let inProgressFilesArray = []
inProgressFiles.forEach((file) => {
inProgressFilesArray.push(files[file])
})
let totalSize = 0
let totalUploadedSize = 0
inProgressFilesArray.forEach((file) => {
totalSize = totalSize + (file.progress.bytesTotal || 0)
totalUploadedSize = totalUploadedSize + (file.progress.bytesUploaded || 0)
})
totalSize = prettyBytes(totalSize)
totalUploadedSize = prettyBytes(totalUploadedSize)
const attachRenderFunctionToTarget = (target) => {
const plugin = this.uppy.getPlugin(target.id)
return Object.assign({}, target, {
icon: plugin.icon || this.opts.defaultTabIcon,
render: plugin.render
})
}
const isSupported = (target) => {
const plugin = this.uppy.getPlugin(target.id)
// If the plugin does not provide a `supported` check, assume the plugin works everywhere.
if (typeof plugin.isSupported !== 'function') {
return true
}
return plugin.isSupported()
}
const acquirers = pluginState.targets
.filter(target => target.type === 'acquirer' && isSupported(target))
.map(attachRenderFunctionToTarget)
const progressindicators = pluginState.targets
.filter(target => target.type === 'progressindicator')
.map(attachRenderFunctionToTarget)
const startUpload = (ev) => {
this.uppy.upload().catch((err) => {
// Log error.
this.uppy.log(err.stack || err.message || err)
})
}
const cancelUpload = (fileID) => {
this.uppy.emit('upload-cancel', fileID)
this.uppy.removeFile(fileID)
}
const showFileCard = (fileID) => {
this.uppy.emit('dashboard:file-card', fileID)
}
const fileCardDone = (meta, fileID) => {
this.uppy.setFileMeta(fileID, meta)
this.uppy.emit('dashboard:file-card')
}
return DashboardUI({
state: state,
modal: pluginState,
newFiles: newFiles,
files: files,
totalFileCount: Object.keys(files).length,
totalProgress: state.totalProgress,
acquirers: acquirers,
activePanel: pluginState.activePanel,
getPlugin: this.uppy.getPlugin,
progressindicators: progressindicators,
autoProceed: this.uppy.opts.autoProceed,
hideUploadButton: this.opts.hideUploadButton,
id: this.id,
closeModal: this.requestCloseModal,
handleClickOutside: this.handleClickOutside,
handleInputChange: this.handleInputChange,
handlePaste: this.handlePaste,
showProgressDetails: this.opts.showProgressDetails,
inline: this.opts.inline,
showPanel: this.showPanel,
hideAllPanels: this.hideAllPanels,
log: this.uppy.log,
i18n: this.i18n,
addFile: this.uppy.addFile,
removeFile: this.uppy.removeFile,
info: this.uppy.info,
note: this.opts.note,
metaFields: this.getPluginState().metaFields,
resumableUploads: this.uppy.state.capabilities.resumableUploads || false,
startUpload: startUpload,
pauseUpload: this.uppy.pauseResume,
retryUpload: this.uppy.retryUpload,
cancelUpload: cancelUpload,
fileCardFor: pluginState.fileCardFor,
showFileCard: showFileCard,
fileCardDone: fileCardDone,
updateDashboardElWidth: this.updateDashboardElWidth,
maxWidth: this.opts.maxWidth,
maxHeight: this.opts.maxHeight,
currentWidth: pluginState.containerWidth,
isWide: pluginState.containerWidth > 400
})
}
discoverProviderPlugins () {
this.uppy.iteratePlugins((plugin) => {
if (plugin && !plugin.target && plugin.opts && plugin.opts.target === this.constructor) {
this.addTarget(plugin)
}
})
}
install () {
// Set default state for Modal
this.setPluginState({
isHidden: true,
showFileCard: false,
activePanel: false,
metaFields: this.opts.metaFields,
targets: []
})
const target = this.opts.target
if (target) {
this.mount(target, this)
}
const plugins = this.opts.plugins || []
plugins.forEach((pluginID) => {
const plugin = this.uppy.getPlugin(pluginID)
if (plugin) plugin.mount(this, plugin)
})
if (!this.opts.disableStatusBar) {
this.uppy.use(StatusBar, {
target: this,
hideUploadButton: this.opts.hideUploadButton
})
}
if (!this.opts.disableInformer) {
this.uppy.use(Informer, {
target: this
})
}
this.discoverProviderPlugins()
this.initEvents()
}
uninstall () {
if (!this.opts.disableInformer) {
const informer = this.uppy.getPlugin('Informer')
if (informer) this.uppy.removePlugin(informer)
}
if (!this.opts.disableStatusBar) {
const statusBar = this.uppy.getPlugin('StatusBar')
// Checking if this plugin exists, in case it was removed by uppy-core
// before the Dashboard was.
if (statusBar) this.uppy.removePlugin(statusBar)
}
const plugins = this.opts.plugins || []
plugins.forEach((pluginID) => {
const plugin = this.uppy.getPlugin(pluginID)
if (plugin) plugin.unmount()
})
this.unmount()
this.removeEvents()
}
}
| 1 | 10,343 | The docs should also be updated with the new option. | transloadit-uppy | js |
@@ -157,6 +157,15 @@ def send_email(subject, message, sender, recipients, image_png=None):
send_email_smtp(config, sender, subject, message, recipients, image_png)
+def send_notification(subject, message, topic):
+ import boto.sns
+ config = configuration.get_config()
+ con = boto.sns.connect_to_region(config.get('sns', 'region', 'us-east-1'),
+ aws_access_key_id=config.get('sns', 'AWS_ACCESS_KEY', None),
+ aws_secret_access_key=config.get('sns', 'AWS_SECRET_KEY', None))
+ con.publish(topic, message, subject[:100])
+
+
def send_error_email(subject, message):
"""
Sends an email to the configured error-email. | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Supports sending emails when tasks fail.
This needs some more documentation.
See :doc:`/configuration` for configuration options.
In particular using the config `error-email` should set up Luigi so that it will send emails when tasks fail.
::
[core]
error-email: foo@bar.baz
'''
import logging
import socket
import sys
from luigi import configuration
logger = logging.getLogger("luigi-interface")
DEFAULT_CLIENT_EMAIL = 'luigi-client@%s' % socket.gethostname()
DEBUG = False
def email_type():
return configuration.get_config().get('core', 'email-type', 'plain')
def generate_email(sender, subject, message, recipients, image_png):
import email
import email.mime
import email.mime.multipart
import email.mime.text
import email.mime.image
msg_root = email.mime.multipart.MIMEMultipart('related')
msg_text = email.mime.text.MIMEText(message, email_type())
msg_text.set_charset('utf-8')
msg_root.attach(msg_text)
if image_png:
fp = open(image_png, 'rb')
msg_image = email.mime.image.MIMEImage(fp.read(), 'png')
fp.close()
msg_root.attach(msg_image)
msg_root['Subject'] = subject
msg_root['From'] = sender
msg_root['To'] = ','.join(recipients)
return msg_root
def wrap_traceback(traceback):
if email_type() == 'html':
return '<pre>%s</pre>' % traceback
return traceback
def send_email_smtp(config, sender, subject, message, recipients, image_png):
import smtplib
smtp_ssl = config.getboolean('core', 'smtp_ssl', False)
smtp_host = config.get('core', 'smtp_host', 'localhost')
smtp_port = config.getint('core', 'smtp_port', 0)
smtp_local_hostname = config.get('core', 'smtp_local_hostname', None)
smtp_timeout = config.getfloat('core', 'smtp_timeout', None)
kwargs = dict(host=smtp_host, port=smtp_port, local_hostname=smtp_local_hostname)
if smtp_timeout:
kwargs['timeout'] = smtp_timeout
smtp_login = config.get('core', 'smtp_login', None)
smtp_password = config.get('core', 'smtp_password', None)
smtp = smtplib.SMTP(**kwargs) if not smtp_ssl else smtplib.SMTP_SSL(**kwargs)
if smtp_login and smtp_password:
smtp.login(smtp_login, smtp_password)
msg_root = generate_email(sender, subject, message, recipients, image_png)
smtp.sendmail(sender, recipients, msg_root.as_string())
def send_email_ses(config, sender, subject, message, recipients, image_png):
import boto.ses
con = boto.ses.connect_to_region(config.get('email', 'region', 'us-east-1'),
aws_access_key_id=config.get('email', 'AWS_ACCESS_KEY', None),
aws_secret_access_key=config.get('email', 'AWS_SECRET_KEY', None))
msg_root = generate_email(sender, subject, message, recipients, image_png)
con.send_raw_email(msg_root.as_string(),
source=msg_root['From'],
destinations=msg_root['To'])
def send_email_sendgrid(config, sender, subject, message, recipients, image_png):
import sendgrid
client = sendgrid.SendGridClient(config.get('email', 'SENDGRID_USERNAME', None),
config.get('email', 'SENDGRID_PASSWORD', None),
raise_errors=True)
to_send = sendgrid.Mail()
to_send.add_to(recipients)
to_send.set_from(sender)
to_send.set_subject(subject)
if email_type() == 'html':
to_send.set_html(message)
else:
to_send.set_text(message)
if image_png:
to_send.add_attachment(image_png)
client.send(to_send)
def send_email(subject, message, sender, recipients, image_png=None):
config = configuration.get_config()
subject = _prefix(subject)
if not recipients or recipients == (None,):
return
if (sys.stdout.isatty() or DEBUG) and (not config.getboolean('email', 'force-send', False)):
logger.info("Not sending email when running from a tty or in debug mode")
return
# Clean the recipients lists to allow multiple error-email addresses, comma
# separated in luigi.cfg
recipients_tmp = []
for r in recipients:
recipients_tmp.extend(r.split(','))
# Replace original recipients with the clean list
recipients = recipients_tmp
email_sender_type = config.get('email', 'type', None)
if email_sender_type == "ses":
send_email_ses(config, sender, subject, message, recipients, image_png)
elif email_sender_type == "sendgrid":
send_email_sendgrid(config, sender, subject, message, recipients, image_png)
else:
send_email_smtp(config, sender, subject, message, recipients, image_png)
def send_error_email(subject, message):
"""
Sends an email to the configured error-email.
If no error-email is configured, then a message is logged.
"""
config = configuration.get_config()
receiver = config.get('core', 'error-email', None)
if receiver:
sender = config.get('core', 'email-sender', DEFAULT_CLIENT_EMAIL)
logger.info("Sending warning email to %r", receiver)
send_email(
subject=subject,
message=message,
sender=sender,
recipients=(receiver,)
)
else:
logger.info("Skipping error email. Set `error-email` in the `core` "
"section of the luigi config file to receive error "
"emails.")
def _prefix(subject):
"""
If the config has a special prefix for emails then this function adds
this prefix.
"""
config = configuration.get_config()
email_prefix = config.get('core', 'email-prefix', None)
if email_prefix is not None:
subject = "%s %s" % (email_prefix, subject)
return subject
| 1 | 12,241 | Name is _way_ to generic for this patch to be accepted I think. Can you rename it to maybe `send_sns_notification` or something else you find suitable? | spotify-luigi | py |
@@ -31,7 +31,7 @@ func run() error {
return fmt.Errorf("failed to load config file: %s", err)
}
- logger := logging.BuildLogger(cfg.Logging.Level)
+ logger := logging.BuildLogger(*cfg.Logging.Level)
ctx := log.WithLogger(context.Background(), logger)
key, err := cfg.Eth.LoadKey() | 1 | package main
import (
"context"
"fmt"
"os"
"os/signal"
"syscall"
log "github.com/noxiouz/zapctx/ctxlog"
"github.com/sonm-io/core/cmd"
"github.com/sonm-io/core/insonmnia/dwh"
"github.com/sonm-io/core/insonmnia/logging"
"github.com/sonm-io/core/util"
"go.uber.org/zap"
)
var (
configFlag string
versionFlag bool
appVersion string
)
func main() {
cmd.NewCmd("dwh", appVersion, &configFlag, &versionFlag, run).Execute()
}
func run() error {
cfg, err := dwh.NewConfig(configFlag)
if err != nil {
return fmt.Errorf("failed to load config file: %s", err)
}
logger := logging.BuildLogger(cfg.Logging.Level)
ctx := log.WithLogger(context.Background(), logger)
key, err := cfg.Eth.LoadKey()
if err != nil {
return fmt.Errorf("failed to load private key: %s", err)
}
w, err := dwh.NewDWH(ctx, cfg, key)
if err != nil {
return fmt.Errorf("failed to create new DWH service: %s", err)
}
go util.StartPrometheus(ctx, cfg.MetricsListenAddr)
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
<-c
w.Stop()
}()
log.G(ctx).Info("starting DWH service", zap.String("grpc_addr", cfg.GRPCListenAddr), zap.String("http_addr", cfg.HTTPListenAddr))
if err := w.Serve(); err != nil {
return fmt.Errorf("failed to serve DWH: %s", err)
}
return nil
}
| 1 | 6,845 | Out of the scope. | sonm-io-core | go |
@@ -151,4 +151,12 @@ if (typeof Object.assign != 'function') {
}
return to;
};
-}
+}
+
+
+NodeList.prototype.forEach = function(callback, thisArg) {
+ thisArg = thisArg || window;
+ for (var i = 0; i < this.length; i++) {
+ callback.call(thisArg, this[i], i, this);
+ }
+}; | 1 | if (!String.prototype.endsWith) {
String.prototype.endsWith = function(search, this_len) {
if (this_len === undefined || this_len > this.length) {
this_len = this.length;
}
return this.substring(this_len - search.length, this_len) === search;
};
}
if (!String.prototype.startsWith) {
Object.defineProperty(String.prototype, 'startsWith', {
value: function(search, pos) {
pos = !pos || pos < 0 ? 0 : +pos;
return this.substring(pos, pos + search.length) === search;
}
});
}
if (!String.prototype.includes) {
String.prototype.includes = function(search, start) {
'use strict';
if (typeof start !== 'number') {
start = 0;
}
if (start + search.length > this.length) {
return false;
} else {
return this.indexOf(search, start) !== -1;
}
};
}
// https://tc39.github.io/ecma262/#sec-array.prototype.find
if (!Array.prototype.find) {
Object.defineProperty(Array.prototype, 'find', {
value: function(predicate) {
// 1. Let O be ? ToObject(this value).
if (this == null) {
throw new TypeError('"this" is null or not defined');
}
var o = Object(this);
// 2. Let len be ? ToLength(? Get(O, "length")).
var len = o.length >>> 0;
// 3. If IsCallable(predicate) is false, throw a TypeError exception.
if (typeof predicate !== 'function') {
throw new TypeError('predicate must be a function');
}
// 4. If thisArg was supplied, let T be thisArg; else let T be undefined.
var thisArg = arguments[1];
// 5. Let k be 0.
var k = 0;
// 6. Repeat, while k < len
while (k < len) {
// a. Let Pk be ! ToString(k).
// b. Let kValue be ? Get(O, Pk).
// c. Let testResult be ToBoolean(? Call(predicate, T, « kValue, k, O »)).
// d. If testResult is true, return kValue.
var kValue = o[k];
if (predicate.call(thisArg, kValue, k, o)) {
return kValue;
}
// e. Increase k by 1.
k++;
}
// 7. Return undefined.
return undefined;
},
configurable: true,
writable: true
});
}
// From https://github.com/kevlatus/polyfill-array-includes/blob/master/array-includes.js
if (!Array.prototype.includes) {
Object.defineProperty(Array.prototype, 'includes', {
value: function (searchElement, fromIndex) {
// 1. Let O be ? ToObject(this value).
if (this == null) {
throw new TypeError('"this" is null or not defined');
}
var o = Object(this);
// 2. Let len be ? ToLength(? Get(O, "length")).
var len = o.length >>> 0;
// 3. If len is 0, return false.
if (len === 0) {
return false;
}
// 4. Let n be ? ToInteger(fromIndex).
// (If fromIndex is undefined, this step produces the value 0.)
var n = fromIndex | 0;
// 5. If n ≥ 0, then
// a. Let k be n.
// 6. Else n < 0,
// a. Let k be len + n.
// b. If k < 0, let k be 0.
var k = Math.max(n >= 0 ? n : len - Math.abs(n), 0);
function sameValueZero(x, y) {
return x === y || (typeof x === 'number' && typeof y === 'number' && isNaN(x) && isNaN(y));
}
// 7. Repeat, while k < len
while (k < len) {
// a. Let elementK be the result of ? Get(O, ! ToString(k)).
// b. If SameValueZero(searchElement, elementK) is true, return true.
// c. Increase k by 1.
if (sameValueZero(o[k], searchElement)) {
return true;
}
k++;
}
// 8. Return false
return false;
}
});
}
// From https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/assign#Polyfill
if (typeof Object.assign != 'function') {
Object.assign = function(target, varArgs) { // .length of function is 2
'use strict';
if (target == null) { // TypeError if undefined or null
throw new TypeError('Cannot convert undefined or null to object');
}
var to = Object(target);
for (var index = 1; index < arguments.length; index++) {
var nextSource = arguments[index];
if (nextSource != null) { // Skip over if undefined or null
for (var nextKey in nextSource) {
// Avoid bugs when hasOwnProperty is shadowed
if (Object.prototype.hasOwnProperty.call(nextSource, nextKey)) {
to[nextKey] = nextSource[nextKey];
}
}
}
}
return to;
};
} | 1 | 14,226 | I think we need an if block to check that if this API is really missing. We wouldn't want to override the browser's implementation. | Countly-countly-server | js |
@@ -89,6 +89,11 @@ function _getItem(name) {
* @param {Function} editorClass Editor class.
*/
function _register(name, editorClass) {
+ if (name && typeof name !== 'string') {
+ editorClass = name;
+ name = editorClass.EDITOR_TYPE;
+ }
+
const editorWrapper = new RegisteredEditor(editorClass);
if (typeof name === 'string') { | 1 | /**
* Utility to register editors and common namespace for keeping reference to all editor classes.
*/
import Hooks from '../pluginHooks';
import staticRegister from '../utils/staticRegister';
const registeredEditorClasses = new WeakMap();
const {
register,
getItem,
hasItem,
getNames,
getValues,
} = staticRegister('editors');
/**
* @param {BaseEditor} editorClass The editor constructor.
*/
export function RegisteredEditor(editorClass) {
const instances = {};
const Clazz = editorClass;
this.getConstructor = function() {
return editorClass;
};
this.getInstance = function(hotInstance) {
if (!(hotInstance.guid in instances)) {
instances[hotInstance.guid] = new Clazz(hotInstance);
}
return instances[hotInstance.guid];
};
Hooks.getSingleton().add('afterDestroy', function() {
instances[this.guid] = null;
});
}
/**
* Returns instance (singleton) of editor class.
*
* @param {string} name Name of an editor under which it has been stored.
* @param {object} hotInstance Instance of Handsontable.
* @returns {Function} Returns instance of editor.
*/
export function _getEditorInstance(name, hotInstance) {
let editor;
if (typeof name === 'function') {
if (!(registeredEditorClasses.get(name))) {
_register(null, name);
}
editor = registeredEditorClasses.get(name);
} else if (typeof name === 'string') {
editor = getItem(name);
} else {
throw Error('Only strings and functions can be passed as "editor" parameter');
}
if (!editor) {
throw Error(`No editor registered under name "${name}"`);
}
return editor.getInstance(hotInstance);
}
/**
* Retrieve editor class.
*
* @param {string} name Editor identification.
* @returns {Function} Returns editor class.
*/
function _getItem(name) {
if (!hasItem(name)) {
throw Error(`No registered editor found under "${name}" name`);
}
return getItem(name).getConstructor();
}
/**
* Register editor class under specified name.
*
* @param {string} name Editor identification.
* @param {Function} editorClass Editor class.
*/
function _register(name, editorClass) {
const editorWrapper = new RegisteredEditor(editorClass);
if (typeof name === 'string') {
register(name, editorWrapper);
}
registeredEditorClasses.set(editorClass, editorWrapper);
}
export {
_register as registerEditor,
_getItem as getEditor,
_getEditorInstance as getEditorInstance,
hasItem as hasEditor,
getNames as getRegisteredEditorNames,
getValues as getRegisteredEditors,
};
| 1 | 17,443 | I was wondering, things like EDITOR_TYPE, VALIDATOR_TYPE, RENDERER_TYPE are not obligatory. Therefore if the custom editor will be registered with this method, `name` will be set to `undefined`. Should we handle this scenario? | handsontable-handsontable | js |
@@ -1,7 +1,6 @@
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-
package p2p
import ( | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package p2p
import (
"context"
"fmt"
"io"
ma "github.com/multiformats/go-multiaddr"
)
type Service interface {
AddProtocol(ProtocolSpec) error
Connect(ctx context.Context, addr ma.Multiaddr) (overlay string, err error)
Disconnect(overlay string) error
}
type Streamer interface {
NewStream(ctx context.Context, address, protocol, stream, version string) (Stream, error)
}
type Stream interface {
io.ReadWriter
io.Closer
}
type ProtocolSpec struct {
Name string
StreamSpecs []StreamSpec
}
type StreamSpec struct {
Name string
Version string
Handler HandlerFunc
}
type HandlerFunc func(Peer, Stream) error
type HandlerMiddleware func(HandlerFunc) HandlerFunc
type IncompatibleStreamError struct {
err error
}
func NewIncompatibleStreamError(err error) *IncompatibleStreamError {
return &IncompatibleStreamError{err: err}
}
func (e *IncompatibleStreamError) Unwrap() error { return e.err }
func (e *IncompatibleStreamError) Error() string {
return fmt.Sprintf("incompatible stream: %v", e.err)
}
func NewSwarmStreamName(protocol, stream, version string) string {
return "/swarm/" + protocol + "/" + stream + "/" + version
}
| 1 | 8,697 | This line should not be removed, as it is not the description of the package, but a copyright header. | ethersphere-bee | go |
@@ -37,6 +37,8 @@ type DiskBlockCacheMetadata struct {
TriggeredPrefetch bool `codec:"HasPrefetched"`
// whether the block's triggered prefetches are complete
FinishedPrefetch bool
+ // the last tag with which the block was marked
+ Tag string
}
// lruEntry is an entry for sorting LRU times | 1 | package libkbfs
import (
"time"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
)
// diskBlockCacheEntry packages an encoded block and serverHalf into one data
// structure, allowing us to encode it as one set of bytes.
type diskBlockCacheEntry struct {
Buf []byte
ServerHalf kbfscrypto.BlockCryptKeyServerHalf
}
// Wrap time.Time so that go-codec falls back to using
// time.Time.MarshalBinary instead of its new msgpack timestamp
// extension encoding.
type legacyEncodedTime struct {
time.Time
}
// DiskBlockCacheMetadata packages the metadata needed to make decisions on
// cache eviction.
type DiskBlockCacheMetadata struct {
// the TLF ID for the block
TlfID tlf.ID
// the last time the block was used
LRUTime legacyEncodedTime
// the size of the block
BlockSize uint32
// whether the block has triggered prefetches
// This used to be called "HasPrefetched" so to maintain compatibility with
// existing disk caches, we have to name it that in the codec tag.
TriggeredPrefetch bool `codec:"HasPrefetched"`
// whether the block's triggered prefetches are complete
FinishedPrefetch bool
}
// lruEntry is an entry for sorting LRU times
type lruEntry struct {
BlockID kbfsblock.ID
Time time.Time
}
type blockIDsByTime []lruEntry
func (b blockIDsByTime) Len() int { return len(b) }
func (b blockIDsByTime) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b blockIDsByTime) Less(i, j int) bool { return b[i].Time.Before(b[j].Time) }
func (b blockIDsByTime) ToBlockIDSlice(numBlocks int) []kbfsblock.ID {
ids := make([]kbfsblock.ID, 0, numBlocks)
for _, entry := range b {
if len(ids) == numBlocks {
return ids
}
ids = append(ids, entry.BlockID)
}
return ids
}
| 1 | 20,893 | Wouldn't it be more space efficient to make this a `byte` enum or bitfield? | keybase-kbfs | go |
@@ -207,13 +207,13 @@ class PostgresTarget(luigi.Target):
connection.autocommit = True
cursor = connection.cursor()
if self.use_db_timestamps:
- sql = """ CREATE TABLE {marker_table} (
+ sql = """ CREATE TABLE IF NOT EXISTS {marker_table} (
update_id TEXT PRIMARY KEY,
target_table TEXT,
inserted TIMESTAMP DEFAULT NOW())
- """.format(marker_table=self.marker_table)
+ """.format(marker_table=self.marker_table)
else:
- sql = """ CREATE TABLE {marker_table} (
+ sql = """ CREATE TABLE IF NOT EXISTS {marker_table} (
update_id TEXT PRIMARY KEY,
target_table TEXT,
inserted TIMESTAMP); | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements a subclass of :py:class:`~luigi.target.Target` that writes data to Postgres.
Also provides a helper task to copy data into a Postgres table.
"""
import datetime
import logging
import re
import tempfile
from luigi import six
import luigi
from luigi.contrib import rdbms
logger = logging.getLogger('luigi-interface')
try:
import psycopg2
import psycopg2.errorcodes
import psycopg2.extensions
except ImportError:
logger.warning("Loading postgres module without psycopg2 installed. Will crash at runtime if postgres functionality is used.")
class MultiReplacer(object):
"""
Object for one-pass replace of multiple words
Substituted parts will not be matched against other replace patterns, as opposed to when using multipass replace.
The order of the items in the replace_pairs input will dictate replacement precedence.
Constructor arguments:
replace_pairs -- list of 2-tuples which hold strings to be replaced and replace string
Usage:
.. code-block:: python
>>> replace_pairs = [("a", "b"), ("b", "c")]
>>> MultiReplacer(replace_pairs)("abcd")
'bccd'
>>> replace_pairs = [("ab", "x"), ("a", "x")]
>>> MultiReplacer(replace_pairs)("ab")
'x'
>>> replace_pairs.reverse()
>>> MultiReplacer(replace_pairs)("ab")
'xb'
"""
# TODO: move to misc/util module
def __init__(self, replace_pairs):
"""
Initializes a MultiReplacer instance.
:param replace_pairs: list of 2-tuples which hold strings to be replaced and replace string.
:type replace_pairs: tuple
"""
replace_list = list(replace_pairs) # make a copy in case input is iterable
self._replace_dict = dict(replace_list)
pattern = '|'.join(re.escape(x) for x, y in replace_list)
self._search_re = re.compile(pattern)
def _replacer(self, match_object):
# this method is used as the replace function in the re.sub below
return self._replace_dict[match_object.group()]
def __call__(self, search_string):
# using function replacing for a per-result replace
return self._search_re.sub(self._replacer, search_string)
# these are the escape sequences recognized by postgres COPY
# according to http://www.postgresql.org/docs/8.1/static/sql-copy.html
default_escape = MultiReplacer([('\\', '\\\\'),
('\t', '\\t'),
('\n', '\\n'),
('\r', '\\r'),
('\v', '\\v'),
('\b', '\\b'),
('\f', '\\f')
])
class PostgresTarget(luigi.Target):
"""
Target for a resource in Postgres.
This will rarely have to be directly instantiated by the user.
"""
marker_table = luigi.configuration.get_config().get('postgres', 'marker-table', 'table_updates')
# Use DB side timestamps or client side timestamps in the marker_table
use_db_timestamps = True
def __init__(
self, host, database, user, password, table, update_id, port=5432
):
"""
Args:
host (str): Postgres server address. Possibly a host:port string.
database (str): Database name
user (str): Database user
password (str): Password for specified user
update_id (str): An identifier for this data set
port (int): Postgres server port.
"""
if ':' in host:
self.host, self.port = host.split(':')
else:
self.host = host
self.port = port
self.database = database
self.user = user
self.password = password
self.table = table
self.update_id = update_id
def touch(self, connection=None):
"""
Mark this update as complete.
Important: If the marker table doesn't exist, the connection transaction will be aborted
and the connection reset.
Then the marker table will be created.
"""
self.create_marker_table()
if connection is None:
# TODO: test this
connection = self.connect()
connection.autocommit = True # if connection created here, we commit it here
if self.use_db_timestamps:
connection.cursor().execute(
"""INSERT INTO {marker_table} (update_id, target_table)
VALUES (%s, %s)
""".format(marker_table=self.marker_table),
(self.update_id, self.table))
else:
connection.cursor().execute(
"""INSERT INTO {marker_table} (update_id, target_table, inserted)
VALUES (%s, %s, %s);
""".format(marker_table=self.marker_table),
(self.update_id, self.table,
datetime.datetime.now()))
# make sure update is properly marked
assert self.exists(connection)
def exists(self, connection=None):
if connection is None:
connection = self.connect()
connection.autocommit = True
cursor = connection.cursor()
try:
cursor.execute("""SELECT 1 FROM {marker_table}
WHERE update_id = %s
LIMIT 1""".format(marker_table=self.marker_table),
(self.update_id,)
)
row = cursor.fetchone()
except psycopg2.ProgrammingError as e:
if e.pgcode == psycopg2.errorcodes.UNDEFINED_TABLE:
row = None
else:
raise
return row is not None
def connect(self):
"""
Get a psycopg2 connection object to the database where the table is.
"""
connection = psycopg2.connect(
host=self.host,
port=self.port,
database=self.database,
user=self.user,
password=self.password)
connection.set_client_encoding('utf-8')
return connection
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
connection = self.connect()
connection.autocommit = True
cursor = connection.cursor()
if self.use_db_timestamps:
sql = """ CREATE TABLE {marker_table} (
update_id TEXT PRIMARY KEY,
target_table TEXT,
inserted TIMESTAMP DEFAULT NOW())
""".format(marker_table=self.marker_table)
else:
sql = """ CREATE TABLE {marker_table} (
update_id TEXT PRIMARY KEY,
target_table TEXT,
inserted TIMESTAMP);
""".format(marker_table=self.marker_table)
try:
cursor.execute(sql)
except psycopg2.ProgrammingError as e:
if e.pgcode == psycopg2.errorcodes.DUPLICATE_TABLE:
pass
else:
raise
connection.close()
def open(self, mode):
raise NotImplementedError("Cannot open() PostgresTarget")
class CopyToTable(rdbms.CopyToTable):
"""
Template task for inserting a data set into Postgres
Usage:
Subclass and override the required `host`, `database`, `user`,
`password`, `table` and `columns` attributes.
To customize how to access data from an input task, override the `rows` method
with a generator that yields each row as a tuple with fields ordered according to `columns`.
"""
def rows(self):
"""
Return/yield tuples or lists corresponding to each row to be inserted.
"""
with self.input().open('r') as fobj:
for line in fobj:
yield line.strip('\n').split('\t')
def map_column(self, value):
"""
Applied to each column of every row returned by `rows`.
Default behaviour is to escape special characters and identify any self.null_values.
"""
if value in self.null_values:
return r'\\N'
else:
return default_escape(six.text_type(value))
# everything below will rarely have to be overridden
def output(self):
"""
Returns a PostgresTarget representing the inserted dataset.
Normally you don't override this.
"""
return PostgresTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.table,
update_id=self.update_id
)
def copy(self, cursor, file):
if isinstance(self.columns[0], six.string_types):
column_names = self.columns
elif len(self.columns[0]) == 2:
column_names = [c[0] for c in self.columns]
else:
raise Exception('columns must consist of column strings or (column string, type string) tuples (was %r ...)' % (self.columns[0],))
cursor.copy_from(file, self.table, null=r'\\N', sep=self.column_separator, columns=column_names)
def run(self):
"""
Inserts data generated by rows() into target table.
If the target table doesn't exist, self.create_table will be called to attempt to create the table.
Normally you don't want to override this.
"""
if not (self.table and self.columns):
raise Exception("table and columns need to be specified")
connection = self.output().connect()
# transform all data generated by rows() using map_column and write data
# to a temporary file for import using postgres COPY
tmp_dir = luigi.configuration.get_config().get('postgres', 'local-tmp-dir', None)
tmp_file = tempfile.TemporaryFile(dir=tmp_dir)
n = 0
for row in self.rows():
n += 1
if n % 100000 == 0:
logger.info("Wrote %d lines", n)
rowstr = self.column_separator.join(self.map_column(val) for val in row)
rowstr += "\n"
tmp_file.write(rowstr.encode('utf-8'))
logger.info("Done writing, importing at %s", datetime.datetime.now())
tmp_file.seek(0)
# attempt to copy the data into postgres
# if it fails because the target table doesn't exist
# try to create it by running self.create_table
for attempt in range(2):
try:
cursor = connection.cursor()
self.init_copy(connection)
self.copy(cursor, tmp_file)
self.post_copy(connection)
except psycopg2.ProgrammingError as e:
if e.pgcode == psycopg2.errorcodes.UNDEFINED_TABLE and attempt == 0:
# if first attempt fails with "relation not found", try creating table
logger.info("Creating table %s", self.table)
connection.reset()
self.create_table(connection)
else:
raise
else:
break
# mark as complete in same transaction
self.output().touch(connection)
# commit and clean up
connection.commit()
connection.close()
tmp_file.close()
class PostgresQuery(rdbms.Query):
"""
Template task for querying a Postgres compatible database
Usage:
Subclass and override the required `host`, `database`, `user`, `password`, `table`, and `query` attributes.
Override the `run` method if your use case requires some action with the query result.
Task instances require a dynamic `update_id`, e.g. via parameter(s), otherwise the query will only execute once
To customize the query signature as recorded in the database marker table, override the `update_id` property.
"""
def run(self):
connection = self.output().connect()
cursor = connection.cursor()
sql = self.query
logger.info('Executing query from task: {name}'.format(name=self.__class__))
cursor.execute(sql)
# Update marker table
self.output().touch(connection)
# commit and close connection
connection.commit()
connection.close()
def output(self):
"""
Returns a PostgresTarget representing the executed query.
Normally you don't override this.
"""
return PostgresTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.table,
update_id=self.update_id
)
| 1 | 16,798 | Flake8 doesn't like the assignment, but non-use of `e` here. I think we can just remove the try/except here for `cursor.execute(sql)`. | spotify-luigi | py |
@@ -0,0 +1,13 @@
+/**
+ * BSD-style license; for more info see http://pmd.sourceforge.net/license.html
+ */
+
+package net.sourceforge.pmd.lang.java.ast;
+
+/**
+ * Marker interface for type body declarations.
+ *
+ * @author Clément Fournier
+ */
+public interface ASTAnyTypeBodyDeclaration extends JavaNode {
+} | 1 | 1 | 12,483 | Maybe add short example list: .... type body declarations, such as AnnotationMembers, Methods, Fields | pmd-pmd | java |
|
@@ -47,6 +47,10 @@ public interface CapabilityType {
String HAS_TOUCHSCREEN = "hasTouchScreen";
String OVERLAPPING_CHECK_DISABLED = "overlappingCheckDisabled";
String STRICT_FILE_INTERACTABILITY = "strictFileInteractability";
+ String TIMEOUTS = "timeouts";
+ String IMPLICIT_TIMEOUT = "implicit";
+ String PAGE_LOAD_TIMEOUT = "pageLoad";
+ String SCRIPT_TIMEOUT = "script";
String LOGGING_PREFS = "loggingPrefs";
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote;
/**
* Commonly seen remote webdriver capabilities.
*/
public interface CapabilityType {
String BROWSER_NAME = "browserName";
@Deprecated String PLATFORM = "platform";
String PLATFORM_NAME = "platformName";
String SUPPORTS_JAVASCRIPT = "javascriptEnabled";
String TAKES_SCREENSHOT = "takesScreenshot";
String VERSION = "version";
String BROWSER_VERSION = "browserVersion";
String SUPPORTS_ALERTS = "handlesAlerts";
String SUPPORTS_SQL_DATABASE = "databaseEnabled";
String SUPPORTS_LOCATION_CONTEXT = "locationContextEnabled";
String SUPPORTS_APPLICATION_CACHE = "applicationCacheEnabled";
String SUPPORTS_NETWORK_CONNECTION = "networkConnectionEnabled";
String PROXY = "proxy";
String SUPPORTS_WEB_STORAGE = "webStorageEnabled";
String ROTATABLE = "rotatable";
String APPLICATION_NAME = "applicationName";
// Enable this capability to accept all SSL certs by defaults.
String ACCEPT_SSL_CERTS = "acceptSslCerts";
String ACCEPT_INSECURE_CERTS = "acceptInsecureCerts";
String HAS_NATIVE_EVENTS = "nativeEvents";
String UNEXPECTED_ALERT_BEHAVIOUR = "unexpectedAlertBehaviour";
String UNHANDLED_PROMPT_BEHAVIOUR = "unhandledPromptBehavior";
String ELEMENT_SCROLL_BEHAVIOR = "elementScrollBehavior";
String HAS_TOUCHSCREEN = "hasTouchScreen";
String OVERLAPPING_CHECK_DISABLED = "overlappingCheckDisabled";
String STRICT_FILE_INTERACTABILITY = "strictFileInteractability";
String LOGGING_PREFS = "loggingPrefs";
String ENABLE_PROFILING_CAPABILITY = "webdriver.logging.profiler.enabled";
String PAGE_LOAD_STRATEGY = "pageLoadStrategy";
interface ForSeleniumServer {
String AVOIDING_PROXY = "avoidProxy";
String ONLY_PROXYING_SELENIUM_TRAFFIC = "onlyProxySeleniumTraffic";
String PROXYING_EVERYTHING = "proxyEverything";
String PROXY_PAC = "proxy_pac";
String ENSURING_CLEAN_SESSION = "ensureCleanSession";
}
}
| 1 | 19,072 | These are really meant to be the keys in the capabilities, not the keys of values within the capabilities | SeleniumHQ-selenium | rb |
@@ -30,6 +30,15 @@ import org.apache.iceberg.io.LocationProvider;
*/
public interface Table {
+ /**
+ * Return the full name for this table.
+ *
+ * @return this table's name
+ */
+ default String name() {
+ return toString();
+ }
+
/**
* Refresh the current table metadata.
*/ | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.List;
import java.util.Map;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.LocationProvider;
/**
* Represents a table.
*/
public interface Table {
/**
* Refresh the current table metadata.
*/
void refresh();
/**
* Create a new {@link TableScan scan} for this table.
* <p>
* Once a table scan is created, it can be refined to project columns and filter data.
*
* @return a table scan for this table
*/
TableScan newScan();
/**
* Return the {@link Schema schema} for this table.
*
* @return this table's schema
*/
Schema schema();
/**
* Return the {@link PartitionSpec partition spec} for this table.
*
* @return this table's partition spec
*/
PartitionSpec spec();
/**
* Return a map of {@link PartitionSpec partition specs} for this table.
*
* @return this table's partition specs map
*/
Map<Integer, PartitionSpec> specs();
/**
* Return the {@link SortOrder sort order} for this table.
*
* @return this table's sort order
*/
SortOrder sortOrder();
/**
* Return a map of sort order IDs to {@link SortOrder sort orders} for this table.
*
* @return this table's sort orders map
*/
Map<Integer, SortOrder> sortOrders();
/**
* Return a map of string properties for this table.
*
* @return this table's properties map
*/
Map<String, String> properties();
/**
* Return the table's base location.
*
* @return this table's location
*/
String location();
/**
* Get the current {@link Snapshot snapshot} for this table, or null if there are no snapshots.
*
* @return the current table Snapshot.
*/
Snapshot currentSnapshot();
/**
* Get the {@link Snapshot snapshot} of this table with the given id, or null if there is no
* matching snapshot.
*
* @return the {@link Snapshot} with the given id.
*/
Snapshot snapshot(long snapshotId);
/**
* Get the {@link Snapshot snapshots} of this table.
*
* @return an Iterable of snapshots of this table.
*/
Iterable<Snapshot> snapshots();
/**
* Get the snapshot history of this table.
*
* @return a list of {@link HistoryEntry history entries}
*/
List<HistoryEntry> history();
/**
* Create a new {@link UpdateSchema} to alter the columns of this table and commit the change.
*
* @return a new {@link UpdateSchema}
*/
UpdateSchema updateSchema();
/**
* Create a new {@link UpdateProperties} to update table properties and commit the changes.
*
* @return a new {@link UpdateProperties}
*/
UpdateProperties updateProperties();
/**
* Create a new {@link UpdateLocation} to update table location and commit the changes.
*
* @return a new {@link UpdateLocation}
*/
UpdateLocation updateLocation();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
*
* @return a new {@link AppendFiles}
*/
AppendFiles newAppend();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
* <p>
* Using this method signals to the underlying implementation that the append should not perform
* extra work in order to commit quickly. Fast appends are not recommended for normal writes
* because the fast commit may cause split planning to slow down over time.
* <p>
* Implementations may not support fast appends, in which case this will return the same appender
* as {@link #newAppend()}.
*
* @return a new {@link AppendFiles}
*/
default AppendFiles newFastAppend() {
return newAppend();
}
/**
* Create a new {@link RewriteFiles rewrite API} to replace files in this table and commit.
*
* @return a new {@link RewriteFiles}
*/
RewriteFiles newRewrite();
/**
* Create a new {@link RewriteManifests rewrite manifests API} to replace manifests for this
* table and commit.
*
* @return a new {@link RewriteManifests}
*/
RewriteManifests rewriteManifests();
/**
* Create a new {@link OverwriteFiles overwrite API} to overwrite files by a filter expression.
*
* @return a new {@link OverwriteFiles}
*/
OverwriteFiles newOverwrite();
/**
* Create a new {@link RowDelta row-level delta API} to remove or replace rows in existing data files.
*
* @return a new {@link RowDelta}
*/
RowDelta newRowDelta();
/**
* Not recommended: Create a new {@link ReplacePartitions replace partitions API} to dynamically
* overwrite partitions in the table with new data.
* <p>
* This is provided to implement SQL compatible with Hive table operations but is not recommended.
* Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
*
* @return a new {@link ReplacePartitions}
*/
ReplacePartitions newReplacePartitions();
/**
* Create a new {@link DeleteFiles delete API} to replace files in this table and commit.
*
* @return a new {@link DeleteFiles}
*/
DeleteFiles newDelete();
/**
* Create a new {@link ExpireSnapshots expire API} to manage snapshots in this table and commit.
*
* @return a new {@link ExpireSnapshots}
*/
ExpireSnapshots expireSnapshots();
/**
* Create a new {@link Rollback rollback API} to roll back to a previous snapshot and commit.
*
* @return a new {@link Rollback}
* @deprecated Replaced by {@link #manageSnapshots()}
*/
@Deprecated
Rollback rollback();
/**
* Create a new {@link ManageSnapshots manage snapshots API} to manage snapshots in this table and commit.
* @return a new {@link ManageSnapshots}
*/
ManageSnapshots manageSnapshots();
/**
* Create a new {@link Transaction transaction API} to commit multiple table operations at once.
*
* @return a new {@link Transaction}
*/
Transaction newTransaction();
/**
* Returns a {@link FileIO} to read and write table data and metadata files.
*/
FileIO io();
/**
* Returns an {@link org.apache.iceberg.encryption.EncryptionManager} to encrypt and decrypt data files.
*/
EncryptionManager encryption();
/**
* Returns a {@link LocationProvider} to provide locations for new data files.
*/
LocationProvider locationProvider();
}
| 1 | 25,822 | It is debatable whether we have to default this. I did this to avoid breaking custom implementations. | apache-iceberg | java |
@@ -54,10 +54,8 @@ class ProxyType:
value = str(value).upper()
for attr in dir(cls):
attr_value = getattr(cls, attr)
- if isinstance(attr_value, dict) and \
- 'string' in attr_value and \
- attr_value['string'] is not None and \
- attr_value['string'] == value:
+ # `attr_value['string'] is not None` probably not required as `attr_value['string'] == value`
+ if isinstance(attr_value, dict) and 'string' in attr_value and attr_value['string'] == value:
return attr_value
raise Exception(f"No proxy type is found for {value}")
| 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The Proxy implementation.
"""
class ProxyTypeFactory:
"""
Factory for proxy types.
"""
@staticmethod
def make(ff_value, string):
return {'ff_value': ff_value, 'string': string}
class ProxyType:
"""
Set of possible types of proxy.
Each proxy type has 2 properties:
'ff_value' is value of Firefox profile preference,
'string' is id of proxy type.
"""
DIRECT = ProxyTypeFactory.make(0, 'DIRECT') # Direct connection, no proxy (default on Windows).
MANUAL = ProxyTypeFactory.make(1, 'MANUAL') # Manual proxy settings (e.g., for httpProxy).
PAC = ProxyTypeFactory.make(2, 'PAC') # Proxy autoconfiguration from URL.
RESERVED_1 = ProxyTypeFactory.make(3, 'RESERVED1') # Never used.
AUTODETECT = ProxyTypeFactory.make(4, 'AUTODETECT') # Proxy autodetection (presumably with WPAD).
SYSTEM = ProxyTypeFactory.make(5, 'SYSTEM') # Use system settings (default on Linux).
UNSPECIFIED = ProxyTypeFactory.make(6, 'UNSPECIFIED') # Not initialized (for internal use).
@classmethod
def load(cls, value):
if isinstance(value, dict) and 'string' in value:
value = value['string']
value = str(value).upper()
for attr in dir(cls):
attr_value = getattr(cls, attr)
if isinstance(attr_value, dict) and \
'string' in attr_value and \
attr_value['string'] is not None and \
attr_value['string'] == value:
return attr_value
raise Exception(f"No proxy type is found for {value}")
class Proxy(object):
"""
Proxy contains information about proxy type and necessary proxy settings.
"""
proxyType = ProxyType.UNSPECIFIED
autodetect = False
ftpProxy = ''
httpProxy = ''
noProxy = ''
proxyAutoconfigUrl = ''
sslProxy = ''
socksProxy = ''
socksUsername = ''
socksPassword = ''
socksVersion = None
def __init__(self, raw=None):
"""
Creates a new Proxy.
:Args:
- raw: raw proxy data. If None, default class values are used.
"""
if raw is not None:
if 'proxyType' in raw and raw['proxyType'] is not None:
self.proxy_type = ProxyType.load(raw['proxyType'])
if 'ftpProxy' in raw and raw['ftpProxy'] is not None:
self.ftp_proxy = raw['ftpProxy']
if 'httpProxy' in raw and raw['httpProxy'] is not None:
self.http_proxy = raw['httpProxy']
if 'noProxy' in raw and raw['noProxy'] is not None:
self.no_proxy = raw['noProxy']
if 'proxyAutoconfigUrl' in raw and raw['proxyAutoconfigUrl'] is not None:
self.proxy_autoconfig_url = raw['proxyAutoconfigUrl']
if 'sslProxy' in raw and raw['sslProxy'] is not None:
self.sslProxy = raw['sslProxy']
if 'autodetect' in raw and raw['autodetect'] is not None:
self.auto_detect = raw['autodetect']
if 'socksProxy' in raw and raw['socksProxy'] is not None:
self.socks_proxy = raw['socksProxy']
if 'socksUsername' in raw and raw['socksUsername'] is not None:
self.socks_username = raw['socksUsername']
if 'socksPassword' in raw and raw['socksPassword'] is not None:
self.socks_password = raw['socksPassword']
if 'socksVersion' in raw and raw['socksVersion'] is not None:
self.socks_version = raw['socksVersion']
@property
def proxy_type(self):
"""
Returns proxy type as `ProxyType`.
"""
return self.proxyType
@proxy_type.setter
def proxy_type(self, value):
"""
Sets proxy type.
:Args:
- value: The proxy type.
"""
self._verify_proxy_type_compatibility(value)
self.proxyType = value
@property
def auto_detect(self):
"""
Returns autodetect setting.
"""
return self.autodetect
@auto_detect.setter
def auto_detect(self, value):
"""
Sets autodetect setting.
:Args:
- value: The autodetect value.
"""
if isinstance(value, bool):
if self.autodetect is not value:
self._verify_proxy_type_compatibility(ProxyType.AUTODETECT)
self.proxyType = ProxyType.AUTODETECT
self.autodetect = value
else:
raise ValueError("Autodetect proxy value needs to be a boolean")
@property
def ftp_proxy(self):
"""
Returns ftp proxy setting.
"""
return self.ftpProxy
@ftp_proxy.setter
def ftp_proxy(self, value):
"""
Sets ftp proxy setting.
:Args:
- value: The ftp proxy value.
"""
self._verify_proxy_type_compatibility(ProxyType.MANUAL)
self.proxyType = ProxyType.MANUAL
self.ftpProxy = value
@property
def http_proxy(self):
"""
Returns http proxy setting.
"""
return self.httpProxy
@http_proxy.setter
def http_proxy(self, value):
"""
Sets http proxy setting.
:Args:
- value: The http proxy value.
"""
self._verify_proxy_type_compatibility(ProxyType.MANUAL)
self.proxyType = ProxyType.MANUAL
self.httpProxy = value
@property
def no_proxy(self):
"""
Returns noproxy setting.
"""
return self.noProxy
@no_proxy.setter
def no_proxy(self, value):
"""
Sets noproxy setting.
:Args:
- value: The noproxy value.
"""
self._verify_proxy_type_compatibility(ProxyType.MANUAL)
self.proxyType = ProxyType.MANUAL
self.noProxy = value
@property
def proxy_autoconfig_url(self):
"""
Returns proxy autoconfig url setting.
"""
return self.proxyAutoconfigUrl
@proxy_autoconfig_url.setter
def proxy_autoconfig_url(self, value):
"""
Sets proxy autoconfig url setting.
:Args:
- value: The proxy autoconfig url value.
"""
self._verify_proxy_type_compatibility(ProxyType.PAC)
self.proxyType = ProxyType.PAC
self.proxyAutoconfigUrl = value
@property
def ssl_proxy(self):
"""
Returns https proxy setting.
"""
return self.sslProxy
@ssl_proxy.setter
def ssl_proxy(self, value):
"""
Sets https proxy setting.
:Args:
- value: The https proxy value.
"""
self._verify_proxy_type_compatibility(ProxyType.MANUAL)
self.proxyType = ProxyType.MANUAL
self.sslProxy = value
@property
def socks_proxy(self):
"""
Returns socks proxy setting.
"""
return self.socksProxy
@socks_proxy.setter
def socks_proxy(self, value):
"""
Sets socks proxy setting.
:Args:
- value: The socks proxy value.
"""
self._verify_proxy_type_compatibility(ProxyType.MANUAL)
self.proxyType = ProxyType.MANUAL
self.socksProxy = value
@property
def socks_username(self):
"""
Returns socks proxy username setting.
"""
return self.socksUsername
@socks_username.setter
def socks_username(self, value):
"""
Sets socks proxy username setting.
:Args:
- value: The socks proxy username value.
"""
self._verify_proxy_type_compatibility(ProxyType.MANUAL)
self.proxyType = ProxyType.MANUAL
self.socksUsername = value
@property
def socks_password(self):
"""
Returns socks proxy password setting.
"""
return self.socksPassword
@socks_password.setter
def socks_password(self, value):
"""
Sets socks proxy password setting.
:Args:
- value: The socks proxy password value.
"""
self._verify_proxy_type_compatibility(ProxyType.MANUAL)
self.proxyType = ProxyType.MANUAL
self.socksPassword = value
@property
def socks_version(self):
"""
Returns socks proxy version setting.
"""
return self.socksVersion
@socks_version.setter
def socks_version(self, value):
"""
Sets socks proxy version setting.
:Args:
- value: The socks proxy version value.
"""
self._verify_proxy_type_compatibility(ProxyType.MANUAL)
self.proxyType = ProxyType.MANUAL
self.socksVersion = value
def _verify_proxy_type_compatibility(self, compatibleProxy):
if self.proxyType != ProxyType.UNSPECIFIED and self.proxyType != compatibleProxy:
raise Exception(f"Specified proxy type ({compatibleProxy}) not compatible with current setting ({self.proxyType})")
def add_to_capabilities(self, capabilities):
"""
Adds proxy information as capability in specified capabilities.
:Args:
- capabilities: The capabilities to which proxy will be added.
"""
proxy_caps = {}
proxy_caps['proxyType'] = self.proxyType['string']
if self.autodetect:
proxy_caps['autodetect'] = self.autodetect
if self.ftpProxy:
proxy_caps['ftpProxy'] = self.ftpProxy
if self.httpProxy:
proxy_caps['httpProxy'] = self.httpProxy
if self.proxyAutoconfigUrl:
proxy_caps['proxyAutoconfigUrl'] = self.proxyAutoconfigUrl
if self.sslProxy:
proxy_caps['sslProxy'] = self.sslProxy
if self.noProxy:
proxy_caps['noProxy'] = self.noProxy
if self.socksProxy:
proxy_caps['socksProxy'] = self.socksProxy
if self.socksUsername:
proxy_caps['socksUsername'] = self.socksUsername
if self.socksPassword:
proxy_caps['socksPassword'] = self.socksPassword
if self.socksVersion:
proxy_caps['socksVersion'] = self.socksVersion
capabilities['proxy'] = proxy_caps
| 1 | 18,411 | # `attr_value['string'] is not None` probably not required as `attr_value['string'] == value` check is already being done | SeleniumHQ-selenium | js |
@@ -8,7 +8,7 @@ import (
"fmt"
"strings"
- "github.com/aws/amazon-ecs-cli-v2/internal/pkg/addons"
+ addon "github.com/aws/amazon-ecs-cli-v2/internal/pkg/addon"
awscloudformation "github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/cloudformation"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/ecr"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/s3" | 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"errors"
"fmt"
"strings"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/addons"
awscloudformation "github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/cloudformation"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/ecr"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/s3"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/session"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/tags"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/cli/selector"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/config"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/deploy/cloudformation"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/deploy/cloudformation/stack"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/describe"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/docker"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/manifest"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/color"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/command"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/log"
termprogress "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/progress"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/workspace"
"github.com/spf13/cobra"
)
const (
inputImageTagPrompt = "Input an image tag value:"
)
var (
errNoLocalManifestsFound = errors.New("no manifest files found")
)
type deploySvcVars struct {
*GlobalOpts
Name string
EnvName string
ImageTag string
ResourceTags map[string]string
}
type deploySvcOpts struct {
deploySvcVars
store store
ws wsSvcReader
ecr ecrService
docker dockerService
s3 artifactUploader
cmd runner
addons templater
appCFN appResourcesGetter
svcCFN cloudformation.CloudFormation
sessProvider sessionProvider
spinner progress
sel wsSelector
// cached variables
targetApp *config.Application
targetEnvironment *config.Environment
targetSvc *config.Service
}
func newSvcDeployOpts(vars deploySvcVars) (*deploySvcOpts, error) {
store, err := config.NewStore()
if err != nil {
return nil, fmt.Errorf("new config store: %w", err)
}
ws, err := workspace.New()
if err != nil {
return nil, fmt.Errorf("new workspace: %w", err)
}
return &deploySvcOpts{
deploySvcVars: vars,
store: store,
ws: ws,
spinner: termprogress.NewSpinner(),
sel: selector.NewWorkspaceSelect(vars.prompt, store, ws),
docker: docker.New(),
cmd: command.New(),
sessProvider: session.NewProvider(),
}, nil
}
// Validate returns an error if the user inputs are invalid.
func (o *deploySvcOpts) Validate() error {
if o.AppName() == "" {
return errNoAppInWorkspace
}
if o.Name != "" {
if err := o.validateSvcName(); err != nil {
return err
}
}
if o.EnvName != "" {
if err := o.validateEnvName(); err != nil {
return err
}
}
return nil
}
// Ask prompts the user for any required fields that are not provided.
func (o *deploySvcOpts) Ask() error {
if err := o.askSvcName(); err != nil {
return err
}
if err := o.askEnvName(); err != nil {
return err
}
if err := o.askImageTag(); err != nil {
return err
}
return nil
}
// Execute builds and pushes the container image for the service,
func (o *deploySvcOpts) Execute() error {
env, err := o.targetEnv()
if err != nil {
return err
}
o.targetEnvironment = env
app, err := o.store.GetApplication(o.AppName())
if err != nil {
return err
}
o.targetApp = app
svc, err := o.store.GetService(o.AppName(), o.Name)
if err != nil {
return fmt.Errorf("get service configuration: %w", err)
}
o.targetSvc = svc
if err := o.configureClients(); err != nil {
return err
}
if err := o.pushToECRRepo(); err != nil {
return err
}
// TODO: delete addons template from S3 bucket when deleting the environment.
addonsURL, err := o.pushAddonsTemplateToS3Bucket()
if err != nil {
return err
}
if err := o.deploySvc(addonsURL); err != nil {
return err
}
return o.showAppURI()
}
// RecommendedActions returns follow-up actions the user can take after successfully executing the command.
func (o *deploySvcOpts) RecommendedActions() []string {
return nil
}
func (o *deploySvcOpts) validateSvcName() error {
names, err := o.ws.ServiceNames()
if err != nil {
return fmt.Errorf("list services in the workspace: %w", err)
}
for _, name := range names {
if o.Name == name {
return nil
}
}
return fmt.Errorf("service %s not found in the workspace", color.HighlightUserInput(o.Name))
}
func (o *deploySvcOpts) validateEnvName() error {
if _, err := o.targetEnv(); err != nil {
return err
}
return nil
}
func (o *deploySvcOpts) targetEnv() (*config.Environment, error) {
env, err := o.store.GetEnvironment(o.AppName(), o.EnvName)
if err != nil {
return nil, fmt.Errorf("get environment %s configuration: %w", o.EnvName, err)
}
return env, nil
}
func (o *deploySvcOpts) askSvcName() error {
if o.Name != "" {
return nil
}
name, err := o.sel.Service("Select a service in your workspace", "")
if err != nil {
return fmt.Errorf("select service: %w", err)
}
o.Name = name
return nil
}
func (o *deploySvcOpts) askEnvName() error {
if o.EnvName != "" {
return nil
}
name, err := o.sel.Environment("Select an environment", "", o.AppName())
if err != nil {
return fmt.Errorf("select environment: %w", err)
}
o.EnvName = name
return nil
}
func (o *deploySvcOpts) askImageTag() error {
if o.ImageTag != "" {
return nil
}
tag, err := getVersionTag(o.cmd)
if err == nil {
o.ImageTag = tag
return nil
}
log.Warningln("Failed to default tag, are you in a git repository?")
userInputTag, err := o.prompt.Get(inputImageTagPrompt, "", nil /*no validation*/)
if err != nil {
return fmt.Errorf("prompt for image tag: %w", err)
}
o.ImageTag = userInputTag
return nil
}
func (o *deploySvcOpts) configureClients() error {
defaultSessEnvRegion, err := o.sessProvider.DefaultWithRegion(o.targetEnvironment.Region)
if err != nil {
return fmt.Errorf("create ECR session with region %s: %w", o.targetEnvironment.Region, err)
}
envSession, err := o.sessProvider.FromRole(o.targetEnvironment.ManagerRoleARN, o.targetEnvironment.Region)
if err != nil {
return fmt.Errorf("assuming environment manager role: %w", err)
}
// ECR client against tools account profile AND target environment region
o.ecr = ecr.New(defaultSessEnvRegion)
o.s3 = s3.New(defaultSessEnvRegion)
// CF client against env account profile AND target environment region
o.svcCFN = cloudformation.New(envSession)
addonsSvc, err := addons.New(o.Name)
if err != nil {
return fmt.Errorf("initiate addons service: %w", err)
}
o.addons = addonsSvc
// client to retrieve an application's resources created with CloudFormation
defaultSess, err := o.sessProvider.Default()
if err != nil {
return fmt.Errorf("create default session: %w", err)
}
o.appCFN = cloudformation.New(defaultSess)
return nil
}
func (o *deploySvcOpts) pushToECRRepo() error {
repoName := fmt.Sprintf("%s/%s", o.appName, o.Name)
uri, err := o.ecr.GetRepository(repoName)
if err != nil {
return fmt.Errorf("get ECR repository URI: %w", err)
}
path, err := o.getDockerfilePath()
if err != nil {
return err
}
if err := o.docker.Build(uri, o.ImageTag, path); err != nil {
return fmt.Errorf("build Dockerfile at %s with tag %s: %w", path, o.ImageTag, err)
}
auth, err := o.ecr.GetECRAuth()
if err != nil {
return fmt.Errorf("get ECR auth data: %w", err)
}
o.docker.Login(uri, auth.Username, auth.Password)
return o.docker.Push(uri, o.ImageTag)
}
func (o *deploySvcOpts) getDockerfilePath() (string, error) {
type dfPath interface {
DockerfilePath() string
}
manifestBytes, err := o.ws.ReadServiceManifest(o.Name)
if err != nil {
return "", fmt.Errorf("read manifest file %s: %w", o.Name, err)
}
svc, err := manifest.UnmarshalService(manifestBytes)
if err != nil {
return "", fmt.Errorf("unmarshal svc manifest: %w", err)
}
mf, ok := svc.(dfPath)
if !ok {
return "", fmt.Errorf("service %s does not have a dockerfile path", o.Name)
}
return mf.DockerfilePath(), nil
}
// pushAddonsTemplateToS3Bucket generates the addons template for the service and pushes it to S3.
// If the service doesn't have any addons, it returns the empty string and no errors.
// If the service has addons, it returns the URL of the S3 object storing the addons template.
func (o *deploySvcOpts) pushAddonsTemplateToS3Bucket() (string, error) {
template, err := o.addons.Template()
if err != nil {
var notExistErr *addons.ErrDirNotExist
if errors.As(err, ¬ExistErr) {
// addons doesn't exist for service, the url is empty.
return "", nil
}
return "", fmt.Errorf("retrieve addons template: %w", err)
}
resources, err := o.appCFN.GetAppResourcesByRegion(o.targetApp, o.targetEnvironment.Region)
if err != nil {
return "", fmt.Errorf("get app resources: %w", err)
}
reader := strings.NewReader(template)
url, err := o.s3.PutArtifact(resources.S3Bucket, fmt.Sprintf(config.AddonsCfnTemplateNameFormat, o.Name), reader)
if err != nil {
return "", fmt.Errorf("put addons artifact to bucket %s: %w", resources.S3Bucket, err)
}
return url, nil
}
func (o *deploySvcOpts) manifest() (interface{}, error) {
raw, err := o.ws.ReadServiceManifest(o.Name)
if err != nil {
return nil, fmt.Errorf("read service %s manifest from workspace: %w", o.Name, err)
}
mft, err := manifest.UnmarshalService(raw)
if err != nil {
return nil, fmt.Errorf("unmarshal service %s manifest: %w", o.Name, err)
}
return mft, nil
}
func (o *deploySvcOpts) runtimeConfig(addonsURL string) (*stack.RuntimeConfig, error) {
resources, err := o.appCFN.GetAppResourcesByRegion(o.targetApp, o.targetEnvironment.Region)
if err != nil {
return nil, fmt.Errorf("get application %s resources from region %s: %w", o.targetApp.Name, o.targetEnvironment.Region, err)
}
repoURL, ok := resources.RepositoryURLs[o.Name]
if !ok {
return nil, &errRepoNotFound{
svcName: o.Name,
envRegion: o.targetEnvironment.Region,
appAccountID: o.targetApp.AccountID,
}
}
return &stack.RuntimeConfig{
ImageRepoURL: repoURL,
ImageTag: o.ImageTag,
AddonsTemplateURL: addonsURL,
AdditionalTags: tags.Merge(o.targetApp.Tags, o.ResourceTags),
}, nil
}
func (o *deploySvcOpts) stackConfiguration(addonsURL string) (cloudformation.StackConfiguration, error) {
mft, err := o.manifest()
if err != nil {
return nil, err
}
rc, err := o.runtimeConfig(addonsURL)
if err != nil {
return nil, err
}
var conf cloudformation.StackConfiguration
switch t := mft.(type) {
case *manifest.LoadBalancedWebService:
if o.targetApp.RequiresDNSDelegation() {
conf, err = stack.NewHTTPSLoadBalancedWebService(t, o.targetEnvironment.Name, o.targetEnvironment.App, *rc)
} else {
conf, err = stack.NewLoadBalancedWebService(t, o.targetEnvironment.Name, o.targetEnvironment.App, *rc)
}
case *manifest.BackendService:
conf, err = stack.NewBackendService(t, o.targetEnvironment.Name, o.targetEnvironment.App, *rc)
default:
return nil, fmt.Errorf("unknown manifest type %T while creating the CloudFormation stack", t)
}
if err != nil {
return nil, fmt.Errorf("create stack configuration: %w", err)
}
return conf, nil
}
func (o *deploySvcOpts) deploySvc(addonsURL string) error {
conf, err := o.stackConfiguration(addonsURL)
if err != nil {
return err
}
o.spinner.Start(
fmt.Sprintf("Deploying %s to %s.",
fmt.Sprintf("%s:%s", color.HighlightUserInput(o.Name), color.HighlightUserInput(o.ImageTag)),
color.HighlightUserInput(o.targetEnvironment.Name)))
if err := o.svcCFN.DeployService(conf, awscloudformation.WithRoleARN(o.targetEnvironment.ExecutionRoleARN)); err != nil {
o.spinner.Stop(log.Serrorf("Failed to deploy service.\n"))
return fmt.Errorf("deploy service: %w", err)
}
o.spinner.Stop("\n")
return nil
}
func (o *deploySvcOpts) showAppURI() error {
type identifier interface {
URI(string) (string, error)
}
var svcDescriber identifier
var err error
switch o.targetSvc.Type {
case manifest.LoadBalancedWebServiceType:
svcDescriber, err = describe.NewWebServiceDescriber(o.AppName(), o.Name)
case manifest.BackendServiceType:
svcDescriber, err = describe.NewBackendServiceDescriber(o.AppName(), o.Name)
default:
err = errors.New("unexpected service type")
}
if err != nil {
return fmt.Errorf("create describer for service type %s: %w", o.targetSvc.Type, err)
}
uri, err := svcDescriber.URI(o.targetEnvironment.Name)
if err != nil {
return fmt.Errorf("get uri for environment %s: %w", o.targetEnvironment.Name, err)
}
switch o.targetSvc.Type {
case manifest.BackendServiceType:
log.Successf("Deployed %s, its service discovery endpoint is %s.\n", color.HighlightUserInput(o.Name), color.HighlightResource(uri))
default:
log.Successf("Deployed %s, you can access it at %s.\n", color.HighlightUserInput(o.Name), color.HighlightResource(uri))
}
return nil
}
// BuildSvcDeployCmd builds the `svc deploy` subcommand.
func BuildSvcDeployCmd() *cobra.Command {
vars := deploySvcVars{
GlobalOpts: NewGlobalOpts(),
}
cmd := &cobra.Command{
Use: "deploy",
Short: "Deploys a service to an environment.",
Long: `Deploys a service to an environment.`,
Example: `
Deploys a service named "frontend" to a "test" environment.
/code $ copilot svc deploy --name frontend --env test
Deploys a service with additional resource tags.
/code $ copilot svc deploy --resource-tags source/revision=bb133e7,deployment/initiator=manual`,
RunE: runCmdE(func(cmd *cobra.Command, args []string) error {
opts, err := newSvcDeployOpts(vars)
if err != nil {
return err
}
if err := opts.Validate(); err != nil {
return err
}
if err := opts.Ask(); err != nil {
return err
}
if err := opts.Execute(); err != nil {
return err
}
return nil
}),
}
cmd.Flags().StringVarP(&vars.Name, nameFlag, nameFlagShort, "", svcFlagDescription)
cmd.Flags().StringVarP(&vars.EnvName, envFlag, envFlagShort, "", envFlagDescription)
cmd.Flags().StringVar(&vars.ImageTag, imageTagFlag, "", imageTagFlagDescription)
cmd.Flags().StringToStringVar(&vars.ResourceTags, resourceTagsFlag, nil, resourceTagsFlagDescription)
return cmd
}
| 1 | 13,658 | we shouldn't need the "addon" rename | aws-copilot-cli | go |
@@ -1,4 +1,6 @@
class Admin::DecksController < ApplicationController
+ before_filter :must_be_admin
+
def index
@decks = Deck.all
end | 1 | class Admin::DecksController < ApplicationController
def index
@decks = Deck.all
end
def new
@deck = Deck.new
end
def create
@deck = build_deck
if @deck.save
redirect_to admin_deck_path(@deck)
else
render :new
end
end
def show
@deck = find_deck
end
private
def find_deck
Deck.find(params[:id])
end
def build_deck
Deck.new(deck_params)
end
def deck_params
params.require(:deck).permit(:title)
end
end
| 1 | 15,570 | These are covered by routing constraints. | thoughtbot-upcase | rb |
@@ -32,7 +32,7 @@ import (
func (s *Service) InstanceIfExists(instanceID *string) (*v1alpha1.Instance, error) {
glog.V(2).Infof("Looking for instance %q", *instanceID)
input := &ec2.DescribeInstancesInput{
- InstanceIds: []*string{instanceID},
+ InstanceIds: aws.StringSlice([]string{*instanceID}),
}
out, err := s.EC2.DescribeInstances(input) | 1 | // Copyright © 2018 The Kubernetes Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ec2
import (
"encoding/base64"
"fmt"
"github.com/golang/glog"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
"sigs.k8s.io/cluster-api-provider-aws/cloud/aws/providerconfig/v1alpha1"
"sigs.k8s.io/cluster-api-provider-aws/cloud/aws/services/certificates"
)
// InstanceIfExists returns the existing instance or nothing if it doesn't exist.
func (s *Service) InstanceIfExists(instanceID *string) (*v1alpha1.Instance, error) {
glog.V(2).Infof("Looking for instance %q", *instanceID)
input := &ec2.DescribeInstancesInput{
InstanceIds: []*string{instanceID},
}
out, err := s.EC2.DescribeInstances(input)
switch {
case IsNotFound(err):
return nil, nil
case err != nil:
return nil, errors.Errorf("failed to describe instances: %v", err)
}
if len(out.Reservations) > 0 && len(out.Reservations[0].Instances) > 0 {
return fromSDKTypeToInstance(out.Reservations[0].Instances[0]), nil
}
return nil, nil
}
// CreateInstance runs an ec2 instance.
func (s *Service) CreateInstance(machine *clusterv1.Machine, config *v1alpha1.AWSMachineProviderConfig, clusterStatus *v1alpha1.AWSClusterProviderStatus) (*v1alpha1.Instance, error) {
input := &v1alpha1.Instance{
Type: config.InstanceType,
IAMProfile: config.IAMInstanceProfile,
}
// Pick image from the machine configuration, or use a default one.
if config.AMI.ID != nil {
input.ImageID = *config.AMI.ID
} else {
input.ImageID = s.defaultAMILookup(clusterStatus.Region)
}
// Pick subnet from the machine configuration, or default to the first private available.
if config.Subnet != nil && config.Subnet.ID != nil {
input.SubnetID = *config.Subnet.ID
} else {
sns := clusterStatus.Network.Subnets.FilterPrivate()
if len(sns) == 0 {
return nil, errors.New("failed to run instance, no subnets available")
}
input.SubnetID = sns[0].ID
}
// apply values based on the role of the machine
if machine.ObjectMeta.Labels["set"] == "controlplane" {
caCert, caKey, err := certificates.NewCertificateAuthority()
if err != nil {
return input, errors.Wrap(err, "Failed to generate a CA for the control plane")
}
clusterStatus.CACertificate = certificates.EncodeCertPEM(caCert)
clusterStatus.CAPrivateKey = certificates.EncodePrivateKeyPEM(caKey)
input.UserData = aws.String(initControlPlaneScript(clusterStatus.CACertificate, clusterStatus.CAPrivateKey))
input.SecurityGroupIDs = append(input.SecurityGroupIDs, clusterStatus.Network.SecurityGroups[v1alpha1.SecurityGroupControlPlane].ID)
}
if machine.ObjectMeta.Labels["set"] == "node" {
input.SecurityGroupIDs = append(input.SecurityGroupIDs, clusterStatus.Network.SecurityGroups[v1alpha1.SecurityGroupNode].ID)
}
// Pick SSH key, if any.
if config.KeyName != "" {
input.KeyName = aws.String(config.KeyName)
} else {
input.KeyName = aws.String(defaultSSHKeyName)
}
return s.runInstance(input)
}
// TerminateInstance terminates an EC2 instance.
// Returns nil on success, error in all other cases.
func (s *Service) TerminateInstance(instanceID *string) error {
input := &ec2.TerminateInstancesInput{
InstanceIds: []*string{
instanceID,
},
}
_, err := s.EC2.TerminateInstances(input)
if err != nil {
return err
}
return nil
}
// CreateOrGetMachine will either return an existing instance or create and return an instance.
func (s *Service) CreateOrGetMachine(machine *clusterv1.Machine, status *v1alpha1.AWSMachineProviderStatus, config *v1alpha1.AWSMachineProviderConfig, clusterStatus *v1alpha1.AWSClusterProviderStatus) (*v1alpha1.Instance, error) {
// instance id exists, try to get it
if status.InstanceID != nil {
glog.V(2).Infof("Looking up instance %q", *status.InstanceID)
instance, err := s.InstanceIfExists(status.InstanceID)
// if there was no error, return the found instance
if err == nil {
return instance, nil
}
// if there was an error but it's not IsNotFound then it's a real error
if !IsNotFound(err) {
return instance, errors.Wrapf(err, "instance %q was not found", *status.InstanceID)
}
return instance, errors.Wrapf(err, "failed to look up instance %q", *status.InstanceID)
}
// otherwise let's create it
return s.CreateInstance(machine, config, clusterStatus)
}
func (s *Service) runInstance(i *v1alpha1.Instance) (*v1alpha1.Instance, error) {
input := &ec2.RunInstancesInput{
InstanceType: aws.String(i.Type),
SubnetId: aws.String(i.SubnetID),
ImageId: aws.String(i.ImageID),
KeyName: i.KeyName,
EbsOptimized: i.EBSOptimized,
MaxCount: aws.Int64(1),
MinCount: aws.Int64(1),
UserData: i.UserData,
}
if i.UserData != nil {
input.UserData = aws.String(base64.StdEncoding.EncodeToString([]byte(*i.UserData)))
}
if len(i.SecurityGroupIDs) > 0 {
input.SecurityGroupIds = aws.StringSlice(i.SecurityGroupIDs)
}
if i.IAMProfile != "" {
input.IamInstanceProfile = &ec2.IamInstanceProfileSpecification{
Name: aws.String(i.IAMProfile),
}
}
if len(i.Tags) > 0 {
spec := &ec2.TagSpecification{ResourceType: aws.String(ec2.ResourceTypeInstance)}
for key, value := range i.Tags {
spec.Tags = append(spec.Tags, &ec2.Tag{
Key: aws.String(key),
Value: aws.String(value),
})
}
input.TagSpecifications = append(input.TagSpecifications, spec)
}
out, err := s.EC2.RunInstances(input)
if err != nil {
return nil, errors.Wrapf(err, "failed to run instance: %v", i)
}
if len(out.Instances) == 0 {
return nil, errors.Errorf("no instance returned for reservation %v", out.GoString())
}
return fromSDKTypeToInstance(out.Instances[0]), nil
}
// UpdateInstanceSecurityGroups modifies the security groups of the given
// EC2 instance.
func (s *Service) UpdateInstanceSecurityGroups(instanceID *string, securityGroups []*string) error {
input := &ec2.ModifyInstanceAttributeInput{
InstanceId: instanceID,
Groups: securityGroups,
}
_, err := s.EC2.ModifyInstanceAttribute(input)
if err != nil {
return err
}
return nil
}
// UpdateResourceTags updates the tags for an instance.
// This will be called if there is anything to create (update) or delete.
// We may not always have to perform each action, so we check what we're
// receiving to avoid calling AWS if we don't need to.
func (s *Service) UpdateResourceTags(resourceID *string, create map[string]string, remove map[string]string) error {
// If we have anything to create or update
if len(create) > 0 {
// Convert our create map into an array of *ec2.Tag
createTagsInput := mapToTags(create)
// Create the CreateTags input.
input := &ec2.CreateTagsInput{
Resources: []*string{resourceID},
Tags: createTagsInput,
}
// Create/Update tags in AWS.
_, err := s.EC2.CreateTags(input)
if err != nil {
return err
}
}
// If we have anything to remove
if len(remove) > 0 {
// Convert our remove map into an array of *ec2.Tag
removeTagsInput := mapToTags(remove)
// Create the DeleteTags input
input := &ec2.DeleteTagsInput{
Resources: []*string{resourceID},
Tags: removeTagsInput,
}
// Delete tags in AWS.
_, err := s.EC2.DeleteTags(input)
if err != nil {
return err
}
}
return nil
}
// fromSDKTypeToInstance takes a ec2.Instance and returns our v1.alpha1.Instance
// type. EC2 types are wrapped or converted to our own types here.
func fromSDKTypeToInstance(v *ec2.Instance) *v1alpha1.Instance {
i := &v1alpha1.Instance{
ID: aws.StringValue(v.InstanceId),
State: v1alpha1.InstanceState(*v.State.Name),
Type: aws.StringValue(v.InstanceType),
SubnetID: aws.StringValue(v.SubnetId),
ImageID: aws.StringValue(v.ImageId),
KeyName: v.KeyName,
PrivateIP: v.PrivateIpAddress,
PublicIP: v.PublicIpAddress,
ENASupport: v.EnaSupport,
EBSOptimized: v.EbsOptimized,
}
for _, sg := range v.SecurityGroups {
i.SecurityGroupIDs = append(i.SecurityGroupIDs, *sg.GroupId)
}
// TODO: Handle returned IAM instance profile, since we are currently
// using a string representing the name, but the InstanceProfile returned
// from the sdk only returns ARN and ID.
if len(v.Tags) > 0 {
i.Tags = tagsToMap(v.Tags)
}
if len(v.SecurityGroups) > 0 {
i.SecurityGroups = groupIdentifierToMap(v.SecurityGroups)
}
return i
}
// initControlPlaneScript returns the b64 encoded script to run on start up.
// The cert Must be CertPEM encoded and the key must be PrivateKeyPEM encoded
func initControlPlaneScript(caCert, caKey []byte) string {
// The script must start with #!. If it goes on the next line Dedent will start the script with a \n.
return fmt.Sprintf(`#!/usr/bin/env bash
mkdir -p /etc/kubernetes/pki
echo '%s' > /etc/kubernetes/pki/ca.crt
echo '%s' > /etc/kubernetes/pki/ca.key
cat >/tmp/kubeadm.yaml <<EOF
apiVersion: kubeadm.k8s.io/v1alpha3
kind: InitConfiguration
nodeRegistration:
criSocket: /var/run/containerd/containerd.sock
EOF
kubeadm init --config /tmp/kubeadm.yaml
# Installation from https://docs.projectcalico.org/v3.2/getting-started/kubernetes/installation/calico
kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f https://docs.projectcalico.org/v3.2/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f https://docs.projectcalico.org/v3.2/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
`, caCert, caKey)
}
| 1 | 6,534 | Could we use just a normal string here? | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -1,6 +1,8 @@
import { VNode } from './vnode';
import options from './options';
+import { iterableToArray, isFunction } from './util';
+const hasSymbolSupport = isFunction(Symbol) && Symbol.iterator;
const stack = [];
| 1 | import { VNode } from './vnode';
import options from './options';
const stack = [];
const EMPTY_CHILDREN = [];
/** JSX/hyperscript reviver
* Benchmarks: https://esbench.com/bench/57ee8f8e330ab09900a1a1a0
* @see http://jasonformat.com/wtf-is-jsx
* @public
* @example
* /** @jsx h *\/
* import { render, h } from 'preact';
* render(<span>foo</span>, document.body);
*/
export function h(nodeName, attributes) {
let children, lastSimple, child, simple, i;
for (i=arguments.length; i-- > 2; ) {
stack.push(arguments[i]);
}
if (attributes && attributes.children) {
if (!stack.length) stack.push(attributes.children);
delete attributes.children;
}
while (stack.length) {
if ((child = stack.pop()) instanceof Array) {
for (i=child.length; i--; ) stack.push(child[i]);
}
else if (child!=null && child!==true && child!==false) {
if (typeof child=='number') child = String(child);
simple = typeof child=='string';
if (simple && lastSimple) {
children[children.length-1] += child;
}
else {
(children || (children = [])).push(child);
lastSimple = simple;
}
}
}
let p = new VNode(nodeName, attributes || undefined, children || EMPTY_CHILDREN);
// if a "vnode hook" is defined, pass every created VNode to it
if (options.vnode) options.vnode(p);
return p;
}
| 1 | 10,327 | `isFunction(Symbol)` triggers a `ReferenceError` if Symbol is undefined. Using `typeof Symbol === 'function'` is OK however. | preactjs-preact | js |
@@ -140,6 +140,10 @@ void nano::election_scheduler::run ()
election->transition_active ();
}
}
+ else
+ {
+ lock.unlock ();
+ }
notify ();
lock.lock ();
} | 1 | #include <nano/node/election_scheduler.hpp>
#include <nano/node/node.hpp>
nano::election_scheduler::election_scheduler (nano::node & node) :
node{ node },
stopped{ false },
thread{ [this] () { run (); } }
{
}
nano::election_scheduler::~election_scheduler ()
{
stop ();
thread.join ();
}
void nano::election_scheduler::manual (std::shared_ptr<nano::block> const & block_a, boost::optional<nano::uint128_t> const & previous_balance_a, nano::election_behavior election_behavior_a, std::function<void (std::shared_ptr<nano::block> const &)> const & confirmation_action_a)
{
nano::lock_guard<nano::mutex> lock{ mutex };
manual_queue.push_back (std::make_tuple (block_a, previous_balance_a, election_behavior_a, confirmation_action_a));
notify ();
}
void nano::election_scheduler::activate (nano::account const & account_a, nano::transaction const & transaction)
{
debug_assert (!account_a.is_zero ());
nano::account_info account_info;
if (!node.store.account.get (transaction, account_a, account_info))
{
nano::confirmation_height_info conf_info;
node.store.confirmation_height.get (transaction, account_a, conf_info);
if (conf_info.height < account_info.block_count)
{
debug_assert (conf_info.frontier != account_info.head);
auto hash = conf_info.height == 0 ? account_info.open_block : node.store.block.successor (transaction, conf_info.frontier);
auto block = node.store.block.get (transaction, hash);
debug_assert (block != nullptr);
if (node.ledger.dependents_confirmed (transaction, *block))
{
nano::lock_guard<nano::mutex> lock{ mutex };
priority.push (account_info.modified, block);
notify ();
}
}
}
}
void nano::election_scheduler::stop ()
{
nano::unique_lock<nano::mutex> lock{ mutex };
stopped = true;
notify ();
}
void nano::election_scheduler::flush ()
{
nano::unique_lock<nano::mutex> lock{ mutex };
condition.wait (lock, [this] () {
return stopped || empty_locked () || node.active.vacancy () <= 0;
});
}
void nano::election_scheduler::notify ()
{
condition.notify_all ();
}
std::size_t nano::election_scheduler::size () const
{
nano::lock_guard<nano::mutex> lock{ mutex };
return priority.size () + manual_queue.size ();
}
bool nano::election_scheduler::empty_locked () const
{
return priority.empty () && manual_queue.empty ();
}
bool nano::election_scheduler::empty () const
{
nano::lock_guard<nano::mutex> lock{ mutex };
return empty_locked ();
}
std::size_t nano::election_scheduler::priority_queue_size () const
{
return priority.size ();
}
bool nano::election_scheduler::priority_queue_predicate () const
{
return node.active.vacancy () > 0 && !priority.empty ();
}
bool nano::election_scheduler::manual_queue_predicate () const
{
return !manual_queue.empty ();
}
bool nano::election_scheduler::overfill_predicate () const
{
return node.active.vacancy () < 0;
}
void nano::election_scheduler::run ()
{
nano::thread_role::set (nano::thread_role::name::election_scheduler);
nano::unique_lock<nano::mutex> lock{ mutex };
while (!stopped)
{
condition.wait (lock, [this] () {
return stopped || priority_queue_predicate () || manual_queue_predicate () || overfill_predicate ();
});
debug_assert ((std::this_thread::yield (), true)); // Introduce some random delay in debug builds
if (!stopped)
{
if (overfill_predicate ())
{
lock.unlock ();
node.active.erase_oldest ();
}
else if (manual_queue_predicate ())
{
auto const [block, previous_balance, election_behavior, confirmation_action] = manual_queue.front ();
manual_queue.pop_front ();
lock.unlock ();
nano::unique_lock<nano::mutex> lock2 (node.active.mutex);
node.active.insert_impl (lock2, block, previous_balance, election_behavior, confirmation_action);
}
else if (priority_queue_predicate ())
{
auto block = priority.top ();
priority.pop ();
lock.unlock ();
std::shared_ptr<nano::election> election;
nano::unique_lock<nano::mutex> lock2 (node.active.mutex);
election = node.active.insert_impl (lock2, block).election;
if (election != nullptr)
{
election->transition_active ();
}
}
notify ();
lock.lock ();
}
}
}
| 1 | 16,988 | Is there an else clause (with a lock.unlock ()) missing here as well? If we think we do not need an else clause then lets add an else clause containing assert zero. | nanocurrency-nano-node | cpp |
@@ -189,7 +189,7 @@ func dispense(w http.ResponseWriter, r *http.Request) {
return
}
- tx, err := c.SendPaymentFromUnencryptedWallet(cfg.Source, target, uint64(cfg.Fee), uint64(cfg.Amount), nil)
+ tx, err := c.SendPaymentFromUnencryptedWallet(cfg.Source, target, uint64(cfg.Fee), uint64(cfg.Amount), [32]byte{}, nil)
if err != nil {
http.Error(w, fmt.Sprintf("failed to dispense money - %v", err), http.StatusInternalServerError)
return | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package main
import (
// "bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strings"
"text/template"
"time"
"golang.org/x/crypto/acme/autocert"
"github.com/algorand/go-algorand/libgoal"
)
var configFile = flag.String("config", "", "JSON configuration file")
var autocertDir = flag.String("autocert", "", "Autocert cache directory")
var listenPort = flag.Int("port", 443, "Port to listen for incoming connections")
var httpsCert = flag.String("cert", "", "https certificate.pem file; mutually exclusive with autocert")
var httpsKey = flag.String("key", "", "https key.pem file; mutually exclusive with autocert")
var configMap map[string]dispenserSiteConfig
var client map[string]libgoal.Client
type recaptchaResponse struct {
Success bool `json:"success"`
ChallengeTS time.Time `json:"challenge_ts"`
Hostname string `json:"hostname"`
ErrorCodes []string `json:"error-codes"`
}
type dispenserSiteConfig struct {
RecaptchaSiteKey string `json:"recaptcha_sitekey"`
RecaptchaSecret string `json:"recaptcha_secret"`
Amount int `json:"amount"`
Fee int `json:"fee"`
Source string `json:"wallet"`
DataDir string `json:"data_dir"`
ExeDir string `json:"exe_dir"`
topPage string
}
const topPageTemplate = `
<html>
<head>
<title>Algorand dispenser</title>
<script src='https://www.google.com/recaptcha/api.js'></script>
<script
src="https://code.jquery.com/jquery-3.3.1.min.js"
integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8="
crossorigin="anonymous"></script>
<script>
function onload() {
$('#dispense').click(function (e) {
var recaptcha = grecaptcha.getResponse();
var target = $('#target').val();
$('#status').html('Sending request..');
var req = $.post('/dispense', {
recaptcha: recaptcha,
target: target,
}, function (data) {
$('#status').html('Code ' + req.status + ' ' + req.statusText + ': ' + req.responseText);
}).fail(function () {
$('#status').html('Code ' + req.status + ' ' + req.statusText + ': ' + req.responseText);
});
});
}
</script>
</head>
<body onload="onload()">
<h1>Algorand dispenser</h1>
<div class="g-recaptcha" data-sitekey="{{.RecaptchaSiteKey}}"></div>
<div>
<input id="target" placeholder="target address">
<button id="dispense">Dispense</button>
</div>
<div>
Status: <span id="status">
</div>
</body>
</html>
`
func getConfig(r *http.Request) dispenserSiteConfig {
return configMap[r.Host]
}
func handler(w http.ResponseWriter, r *http.Request) {
cfg := getConfig(r)
w.Header().Set("Content-Type", "text/html; charset=utf-8")
tmpl, err := template.New("top").Parse(topPageTemplate)
if err != nil {
log.Printf("Error parsing top page template: %v\n", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = tmpl.Execute(w, cfg)
if err != nil {
log.Printf("Error executing template: %v\n", err)
}
}
func (cfg dispenserSiteConfig) checkRecaptcha(remoteip, response string) (r recaptchaResponse, err error) {
resp, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify",
url.Values{"secret": {cfg.RecaptchaSecret},
"response": {response},
"remoteip": {remoteip}})
if err != nil {
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
err = json.Unmarshal(body, &r)
return
}
func dispense(w http.ResponseWriter, r *http.Request) {
cfg := getConfig(r)
err := r.ParseForm()
if err != nil {
log.Printf("Error parsing form: %v\n", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
recaptcha := r.Form.Get("recaptcha")
recap, err := cfg.checkRecaptcha(r.RemoteAddr, recaptcha)
if err != nil {
log.Printf("Error checking RECAPTCHA: %v\n", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if !recap.Success {
log.Printf("RECAPTCHA failed\n")
http.Error(w, "RECAPTCHA failed", http.StatusForbidden)
return
}
targets := r.Form["target"]
if len(targets) != 1 {
log.Printf("Corrupted target argument\n")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
target := targets[0]
c, ok := client[r.Host]
if !ok {
http.Error(w, fmt.Sprintf("didn't find client for host %s", r.Host), http.StatusBadRequest)
return
}
tx, err := c.SendPaymentFromUnencryptedWallet(cfg.Source, target, uint64(cfg.Fee), uint64(cfg.Amount), nil)
if err != nil {
http.Error(w, fmt.Sprintf("failed to dispense money - %v", err), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(tx.ID().String())
}
func main() {
flag.Parse()
http.HandleFunc("/", handler)
http.HandleFunc("/dispense", dispense)
tmpl, err := template.New("top").Parse(topPageTemplate)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing top page template: %v\n", err)
os.Exit(1)
}
configText, err := ioutil.ReadFile(*configFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read config file (%s): %v\n", *configFile, err)
os.Exit(1)
}
configMap = make(map[string]dispenserSiteConfig)
err = json.Unmarshal(configText, &configMap)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot load config file (%s): %v\n", *configFile, err)
os.Exit(1)
}
client = make(map[string]libgoal.Client)
var hosts []string
for h, cfg := range configMap {
// Make a cache dir for wallet handle tokens
cacheDir, err := ioutil.TempDir("", "dispenser")
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot make temp dir: %v\n", err)
os.Exit(1)
}
// Init libgoal Client
c, err := libgoal.MakeClientWithBinDir(cfg.ExeDir, cfg.DataDir, cacheDir, libgoal.FullClient)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot init libgoal %v\n", err)
os.Exit(1)
}
client[h] = c
hosts = append(hosts, h)
var buf strings.Builder
err = tmpl.Execute(&buf, cfg)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot execute template for site %s: %v\n", h, err)
os.Exit(1)
}
cfg.topPage = buf.String()
configMap[h] = cfg
}
useAutocert := false
if *autocertDir != "" || *httpsCert == "" || *httpsKey == "" {
useAutocert = true
}
if useAutocert {
cacheDir := *autocertDir
if cacheDir == "" {
cacheDir = os.Getenv("HOME") + "/.autocert"
}
m := autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(hosts...),
Cache: autocert.DirCache(cacheDir),
}
go http.ListenAndServe(":80", m.HTTPHandler(nil))
log.Fatal(http.Serve(m.Listener(), nil))
} else {
log.Fatal(http.ListenAndServeTLS(fmt.Sprintf(":%d", *listenPort), *httpsCert, *httpsKey, nil))
}
}
| 1 | 36,431 | do we foresee `32` ever changing? (as well as its use in error checking in `clerk.go`) if "no" this is fine, if "yes" it's probably still fine | algorand-go-algorand | go |
@@ -19,13 +19,14 @@ package org.openqa.selenium.remote;
import com.google.common.collect.ImmutableMap;
+import org.openqa.selenium.DeviceRotation;
import org.openqa.selenium.Rotatable;
import org.openqa.selenium.ScreenOrientation;
import java.lang.reflect.Method;
public class AddRotatable implements AugmenterProvider {
-
+
public Class<?> getDescribedInterface() {
return Rotatable.class;
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote;
import com.google.common.collect.ImmutableMap;
import org.openqa.selenium.Rotatable;
import org.openqa.selenium.ScreenOrientation;
import java.lang.reflect.Method;
public class AddRotatable implements AugmenterProvider {
public Class<?> getDescribedInterface() {
return Rotatable.class;
}
public InterfaceImplementation getImplementation(Object value) {
return new InterfaceImplementation() {
public Object invoke(ExecuteMethod executeMethod, Object self, Method method, Object... args) {
if ("rotate".equals(method.getName())) {
return executeMethod.execute(DriverCommand.SET_SCREEN_ORIENTATION,
ImmutableMap.of("orientation", args[0]));
} else if ("getOrientation".equals(method.getName())) {
return ScreenOrientation.valueOf((String) executeMethod.execute(
DriverCommand.GET_SCREEN_ORIENTATION, null));
}
return null;
}
};
}
}
| 1 | 13,511 | you don't need to create a enum for this, java allows you to switch on a string ;) (since java 7?) | SeleniumHQ-selenium | py |
@@ -35,6 +35,12 @@ export default AuthenticatedRoute.extend(styleBody, CurrentUserSettings, Paginat
});
}
+ // we need to load the roles into ember cache
+ // invites return role_id only and we do not offer a /role/:id endpoint
+ this.get('store').query('role', {permissions: 'assign'}).then((roles) => {
+ modelPromises.roles = roles;
+ });
+
return RSVP.hash(modelPromises);
});
}, | 1 | import AuthenticatedRoute from 'ghost-admin/routes/authenticated';
import CurrentUserSettings from 'ghost-admin/mixins/current-user-settings';
import PaginationMixin from 'ghost-admin/mixins/pagination';
import styleBody from 'ghost-admin/mixins/style-body';
import RSVP from 'rsvp';
import {isBlank} from 'ember-utils';
export default AuthenticatedRoute.extend(styleBody, CurrentUserSettings, PaginationMixin, {
titleToken: 'Team',
classNames: ['view-team'],
paginationModel: 'user',
paginationSettings: {
status: 'all',
limit: 20
},
model() {
return this.get('session.user').then((user) => {
let modelPromises = {
users: this.loadFirstPage().then(() => {
return this.store.filter('user', (user) => {
return !user.get('isNew') && !isBlank(user.get('status'));
});
})
};
// authors do not have permission to hit the invites endpoint
if (!user.get('isAuthor')) {
modelPromises.invites = this.store.query('invite', {limit: 'all'}).then(() => {
return this.store.filter('invite', (invite) => {
return !invite.get('isNew');
});
});
}
return RSVP.hash(modelPromises);
});
},
setupController(controller, models) {
controller.setProperties(models);
},
actions: {
reload() {
this.refresh();
}
}
});
| 1 | 7,647 | This should be `modelPromises.roles = this.get('store').query('role', ...` and the promise body should just `return roles;`. This is because the `model()` hook will pause for the returned promises to be resolved before continuing so when returning `RSVP.hash` you end up with a hash containing the values returned from the promises so the controller and everything else relying on the `model` object doesn't need to worry about things being async. | TryGhost-Admin | js |
@@ -0,0 +1,19 @@
+const landmarks = axe.commons.aria.getRolesByType('landmark');
+const sectioning = ['article', 'aside', 'main', 'navigation', 'section'];
+const nodeIsHeader = node.tagName.toLowerCase() === 'header' && node.getAttribute('role') !== 'banner';
+var parent = axe.commons.dom.getComposedParent(node);
+
+while (parent){
+ var role = parent.getAttribute('role');
+ if (!role && (parent.tagName.toLowerCase() !== 'form')){
+ role = axe.commons.aria.implicitRole(parent);
+ }
+ if (role && nodeIsHeader && sectioning.includes(role)){
+ return true;
+ }
+ if (role && landmarks.includes(role)){
+ return false;
+ }
+ parent = axe.commons.dom.getComposedParent(parent);
+}
+return true; | 1 | 1 | 12,001 | This is basically a repeat of `main-is-top-level.js`. I suggest having these 3 checks point to the same evaluate file. You can change what element they test through the rule selector as well as the check.json options property. So you would have `main-is-top-level.json`, `banner-is-top-level.json` and `contentinfo-is-top-level.json` each point to `landmark-is-top-level.js`, and pass in a different `options` object which tells it what element you're trying to test. | dequelabs-axe-core | js |
|
@@ -63,13 +63,13 @@ public class PackageListeners {
}
synchronized void packagesUpdated(List<PackageLoader.Package> pkgs) {
- MDCLoggingContext.setCore(core);
+ if(core != null) MDCLoggingContext.setCore(core);
try {
for (PackageLoader.Package pkgInfo : pkgs) {
invokeListeners(pkgInfo);
}
} finally {
- MDCLoggingContext.clear();
+ if(core != null) MDCLoggingContext.clear();
}
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.pkg;
import java.lang.invoke.MethodHandles;
import java.lang.ref.Reference;
import java.lang.ref.SoftReference;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.solr.core.PluginInfo;
import org.apache.solr.core.SolrCore;
import org.apache.solr.logging.MDCLoggingContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class PackageListeners {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final String PACKAGE_VERSIONS = "PKG_VERSIONS";
private SolrCore core;
public PackageListeners(SolrCore core) {
this.core = core;
}
// this registry only keeps a weak reference because it does not want to
// cause a memory leak if the listener forgets to unregister itself
private List<Reference<Listener>> listeners = new ArrayList<>();
public synchronized void addListener(Listener listener) {
listeners.add(new SoftReference<>(listener));
}
public synchronized void removeListener(Listener listener) {
Iterator<Reference<Listener>> it = listeners.iterator();
while (it.hasNext()) {
Reference<Listener> ref = it.next();
Listener pkgListener = ref.get();
if (pkgListener == null || pkgListener == listener) {
it.remove();
}
}
}
synchronized void packagesUpdated(List<PackageLoader.Package> pkgs) {
MDCLoggingContext.setCore(core);
try {
for (PackageLoader.Package pkgInfo : pkgs) {
invokeListeners(pkgInfo);
}
} finally {
MDCLoggingContext.clear();
}
}
private synchronized void invokeListeners(PackageLoader.Package pkg) {
for (Reference<Listener> ref : listeners) {
Listener listener = ref.get();
if(listener == null) continue;
if (listener.packageName() == null || listener.packageName().equals(pkg.name())) {
listener.changed(pkg);
}
}
}
public List<Listener> getListeners() {
List<Listener> result = new ArrayList<>();
for (Reference<Listener> ref : listeners) {
Listener l = ref.get();
if (l != null) {
result.add(l);
}
}
return result;
}
public interface Listener {
/**Name of the package or null to loisten to all package changes
*/
String packageName();
PluginInfo pluginInfo();
void changed(PackageLoader.Package pkg);
PackageLoader.Package.Version getPackageVersion();
}
}
| 1 | 33,752 | Why do we need this check? setCore already handles nulls. | apache-lucene-solr | java |
@@ -121,7 +121,7 @@ def _inject(target_type, inject_type):
# Inject all the methods from the hierarchy:
setattr(target_type, "__getattr__", inject_type.__getattr__)
setattr(target_type, "__getitem__", inject_type.__getitem__)
- for attr in ["__iter__", "__len__", "__invert__", "__setitem__"]:
+ for attr in ["__iter__", "__len__", "__invert__", "__setitem__", "__dir__"]:
if hasattr(inject_type, attr):
setattr(target_type, attr, inject_type.__dict__[attr])
for t in mro: | 1 | import pyspark.sql.dataframe as df
import pyspark.sql.column as col
import pyspark.sql.functions as F
import pyspark
from decorator import decorator
import types
import logging
from .structures import *
from . import namespace
logger = logging.getLogger('spark')
_TOUCHED_TEST = "_pandas_updated"
def patch_spark():
"""
This function monkey patches Spark to make PySpark's behavior similar to Pandas.
See the readme documentation for an exhaustive list of the changes performed by this function.
Once this function is called, the behavior cannot be reverted.
"""
# Directly patching the base does not work because DataFrame inherits from object
# (known python limitation)
# NormalDF = pyspark.sql.dataframe.DataFrame
# PatchedDF = type("DataFrame0", (PandasLikeDataFrame, object), dict(NormalDF.__dict__))
# pyspark.sql.dataframe.DataFrame = PatchedDF
# pyspark.sql.DataFrame = PatchedDF
# Just going to update the dictionary
_inject(df.DataFrame, PandasLikeDataFrame)
_inject(df.Column, PandasLikeSeries)
# Override in all cases these methods to prevent any dispatching.
df.Column.__repr__ = PandasLikeSeries.__repr__
df.Column.__str__ = PandasLikeSeries.__str__
# Replace the creation of the operators in columns
_wrap_operators()
# Wrap all the functions in the standard libraries
_wrap_functions()
# Inject a few useful functions.
pyspark.read_csv = namespace.read_csv
pyspark.read_parquet = namespace.read_parquet
pyspark.to_datetime = namespace.to_datetime
@decorator
def wrap_column_function(f, *args, **kwargs):
# Call the function first
# print("wrap_column_function:calling {} on args={}, kwargs={}".format(f, args, kwargs))
res = f(*args, **kwargs)
if isinstance(res, col.Column):
# print("res is a column")
# Need to track where this column is coming from
all_inputs = list(args) + list(kwargs.values())
def ref_df(x):
if isinstance(x, df.DataFrame):
return x
if isinstance(x, df.Column):
if hasattr(x, "_spark_ref_dataframe"):
return x._spark_ref_dataframe
else:
logger.warning("Found a column without reference: {}".format(str(x)))
return None
all_col_inputs = [ref_df(c) for c in all_inputs]
# print("wrap_column_function:all_col_inputs", all_col_inputs)
all_df_inputs = list(dict([(id(f), f) for f in all_col_inputs if f]).items())
# print("wrap_column_function:all_df_inputs", all_df_inputs)
if len(all_df_inputs) > 1:
logger.warning("Too many anchors to conclude")
elif not all_df_inputs:
logger.warning("Could not find anchors")
else:
(_, df_ref) = all_df_inputs[0]
res._spark_ref_dataframe = df_ref
return res
def _wrap_operators():
attrs = ["__neg__", "__add__", "__sub__", "__mul__", "__div__", "__truediv__", "__mod__",
"__eq__", "__ne__", "__lt__", "__le__", "__ge__", "__gt__"]
if hasattr(col.Column, _TOUCHED_TEST):
return
for attr in attrs:
oldfun = getattr(col.Column, attr)
fun = wrap_column_function(oldfun)
setattr(col.Column, attr, fun)
setattr(col.Column, _TOUCHED_TEST, "")
def _wrap_functions():
all_funs = F.__all__
if hasattr(F, _TOUCHED_TEST):
return
for fname in all_funs:
if fname in ('pandas_udf',):
continue
oldfun = getattr(F, fname)
if isinstance(oldfun, types.FunctionType):
fun = wrap_column_function(oldfun)
setattr(F, fname, fun)
setattr(F, _TOUCHED_TEST, "")
def _inject(target_type, inject_type):
# Make sure to resolve the base classes too.
mro = list(inject_type.__mro__)
mro.reverse()
print(mro)
# Keep a duplicate of all the existing methods:
setattr(target_type, "_spark_getattr", target_type.__getattr__)
setattr(target_type, "_spark_getitem", target_type.__getitem__)
for (key, fun) in list(target_type.__dict__.items()):
# Skip the system attributes
if key.startswith("__") or key.startswith("_spark_"):
continue
setattr(target_type, "_spark_" + key, fun)
# Inject all the methods from the hierarchy:
setattr(target_type, "__getattr__", inject_type.__getattr__)
setattr(target_type, "__getitem__", inject_type.__getitem__)
for attr in ["__iter__", "__len__", "__invert__", "__setitem__"]:
if hasattr(inject_type, attr):
setattr(target_type, attr, inject_type.__dict__[attr])
for t in mro:
if t == object:
continue
for (key, fun) in list(t.__dict__.items()):
# Skip the system attributes
if key.startswith("__") or key.startswith("_spark_"):
continue
setattr(target_type, key, fun)
| 1 | 8,011 | good catch, thanks. This is going to make the user experience much better. | databricks-koalas | py |
@@ -1,4 +1,4 @@
-import { options } from 'preact';
+import { Component, options } from 'preact';
/** @type {number} */
let currentIndex; | 1 | import { options } from 'preact';
/** @type {number} */
let currentIndex;
/** @type {import('./internal').Component} */
let currentComponent;
/** @type {Array<import('./internal').Component>} */
let afterPaintEffects = [];
let oldBeforeRender = options.render;
options.render = vnode => {
if (oldBeforeRender) oldBeforeRender(vnode);
currentComponent = vnode._component;
currentIndex = 0;
if (!currentComponent.__hooks) return;
currentComponent.__hooks._pendingEffects.forEach(invokeEffect);
currentComponent.__hooks._pendingEffects = [];
};
let oldAfterDiff = options.diffed;
options.diffed = vnode => {
if (oldAfterDiff) oldAfterDiff(vnode);
const c = vnode._component;
if (!c) return;
const hooks = c.__hooks;
if (!hooks) return;
// TODO: Consider moving to a global queue. May need to move
// this to the `commit` option
hooks._pendingLayoutEffects.forEach(invokeEffect);
hooks._pendingLayoutEffects = [];
};
let oldBeforeUnmount = options.unmount;
options.unmount = vnode => {
if (oldBeforeUnmount) oldBeforeUnmount(vnode);
const c = vnode._component;
if (!c) return;
const hooks = c.__hooks;
if (!hooks) return;
hooks._list.forEach(hook => hook._cleanup && hook._cleanup());
};
/**
* Get a hook's state from the currentComponent
* @param {number} index The index of the hook to get
* @returns {import('./internal').HookState}
*/
function getHookState(index) {
// Largely inspired by:
// * https://github.com/michael-klein/funcy.js/blob/master/src/hooks/core_hooks.mjs
// * https://github.com/michael-klein/funcy.js/blob/master/src/lib/renderer.mjs
// Other implementations to look at:
// * https://codesandbox.io/s/mnox05qp8
const hooks = currentComponent.__hooks || (currentComponent.__hooks = { _list: [], _pendingEffects: [], _pendingLayoutEffects: [] });
if (index >= hooks._list.length) {
hooks._list.push({});
}
return hooks._list[index];
}
export function useState(initialState) {
return useReducer(invokeOrReturn, initialState);
}
export function useReducer(reducer, initialState, init) {
/** @type {import('./internal').ReducerHookState} */
const hookState = getHookState(currentIndex++);
if (hookState._component == null) {
hookState._component = currentComponent;
hookState._value = [
init == null ? invokeOrReturn(null, initialState) : init(initialState),
action => {
hookState._value[0] = reducer(hookState._value[0], action);
hookState._component.setState({});
}
];
}
return hookState._value;
}
/**
* @param {import('./internal').Effect} callback
* @param {any[]} args
*/
export function useEffect(callback, args) {
/** @type {import('./internal').EffectHookState} */
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._value = callback;
state._args = args;
currentComponent.__hooks._pendingEffects.push(state);
afterPaint(currentComponent);
}
}
/**
* @param {import('./internal').Effect} callback
* @param {any[]} args
*/
export function useLayoutEffect(callback, args) {
/** @type {import('./internal').EffectHookState} */
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._value = callback;
state._args = args;
currentComponent.__hooks._pendingLayoutEffects.push(state);
}
}
export function useRef(initialValue) {
const state = getHookState(currentIndex++);
if (state._value == null) {
state._value = { current: initialValue };
}
return state._value;
}
/**
* @param {() => any} callback
* @param {any[]} args
*/
export function useMemo(callback, args) {
/** @type {import('./internal').MemoHookState} */
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._args = args;
state._callback = callback;
return state._value = callback();
}
return state._value;
}
/**
* @param {() => void} callback
* @param {any[]} args
*/
export function useCallback(callback, args) {
return useMemo(() => callback, args);
}
/**
* @param {import('./internal').PreactContext} context
*/
export function useContext(context) {
const provider = currentComponent.context[context._id];
if (provider == null) return context._defaultValue;
const state = getHookState(currentIndex++);
if (state._value == null) {
state._value = true;
provider.sub(currentComponent);
}
return provider.props.value;
}
// Note: if someone used Component.debounce = requestAnimationFrame,
// then effects will ALWAYS run on the NEXT frame instead of the current one, incurring a ~16ms delay.
// Perhaps this is not such a big deal.
/**
* Invoke a component's pending effects after the next frame renders
* @type {(component: import('./internal').Component) => void}
*/
let afterPaint = () => {};
/**
* After paint effects consumer.
*/
function flushAfterPaintEffects() {
afterPaintEffects.forEach(component => {
component._afterPaintQueued = false;
if (!component._parentDom) return;
component.__hooks._pendingEffects.forEach(invokeEffect);
component.__hooks._pendingEffects = [];
});
afterPaintEffects = [];
}
function scheduleFlushAfterPaint() {
setTimeout(flushAfterPaintEffects, 0);
}
if (typeof window !== 'undefined') {
afterPaint = (component) => {
if (!component._afterPaintQueued && (component._afterPaintQueued = true) && afterPaintEffects.push(component) === 1) {
requestAnimationFrame(scheduleFlushAfterPaint);
}
};
}
/**
* Invoke a Hook's effect
* @param {import('./internal').EffectHookState} hook
*/
function invokeEffect(hook) {
if (hook._cleanup) hook._cleanup();
const result = hook._value();
if (typeof result === 'function') hook._cleanup = result;
}
function argsChanged(oldArgs, newArgs) {
return oldArgs == null || newArgs.some((arg, index) => arg !== oldArgs[index]);
}
function invokeOrReturn(arg, f) {
return typeof f === 'function' ? f(arg) : f;
}
| 1 | 12,523 | TODO: remove this unused import | preactjs-preact | js |
@@ -591,7 +591,7 @@ func (s *HandlerTestSuite) TestValidateJWTSVID() {
},
workloadUpdate: &cache.WorkloadUpdate{},
code: codes.InvalidArgument,
- msg: "token contains an invalid number of segments",
+ msg: "square/go-jose: compact JWS format must have three parts",
},
{
name: "validated by our trust domain bundle", | 1 | package workload
import (
"context"
"crypto/x509"
"encoding/base64"
"testing"
"time"
"github.com/golang/mock/gomock"
structpb "github.com/golang/protobuf/ptypes/struct"
"github.com/sirupsen/logrus/hooks/test"
"github.com/spiffe/go-spiffe/proto/spiffe/workload"
"github.com/spiffe/spire/pkg/agent/client"
"github.com/spiffe/spire/pkg/agent/manager/cache"
"github.com/spiffe/spire/pkg/common/bundleutil"
"github.com/spiffe/spire/pkg/common/jwtsvid"
"github.com/spiffe/spire/pkg/common/peertracker"
"github.com/spiffe/spire/pkg/common/pemutil"
"github.com/spiffe/spire/pkg/common/telemetry"
"github.com/spiffe/spire/proto/spire/common"
"github.com/spiffe/spire/test/fakes/fakeagentcatalog"
"github.com/spiffe/spire/test/fakes/fakeworkloadattestor"
mock_manager "github.com/spiffe/spire/test/mock/agent/manager"
mock_cache "github.com/spiffe/spire/test/mock/agent/manager/cache"
mock_telemetry "github.com/spiffe/spire/test/mock/common/telemetry"
mock_workload "github.com/spiffe/spire/test/mock/proto/api/workload"
"github.com/spiffe/spire/test/spiretest"
"github.com/spiffe/spire/test/util"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
)
var (
jwtSigningKey, _ = pemutil.ParseSigner([]byte(`-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgGZx/yLVskGyXAyIT
uDe7PI1X4Dt1boMWfysKPyOJeMuhRANCAARzgo1R4J4xtjGpmGFNl2KADaxDpgx3
KfDQqPUcYWUMm2JbwFyHxQfhJfSf+Mla5C4FnJG6Ksa7pWjITPf5KbHi
-----END PRIVATE KEY-----
`))
)
func TestHandler(t *testing.T) {
spiretest.Run(t, new(HandlerTestSuite))
}
type HandlerTestSuite struct {
spiretest.Suite
h *Handler
ctrl *gomock.Controller
attestor *fakeworkloadattestor.WorkloadAttestor
manager *mock_manager.MockManager
metrics *mock_telemetry.MockMetrics
}
func (s *HandlerTestSuite) SetupTest() {
mockCtrl := gomock.NewController(s.T())
log, _ := test.NewNullLogger()
s.attestor = fakeworkloadattestor.New()
s.manager = mock_manager.NewMockManager(mockCtrl)
s.metrics = mock_telemetry.NewMockMetrics(mockCtrl)
catalog := fakeagentcatalog.New()
catalog.SetWorkloadAttestors(fakeagentcatalog.WorkloadAttestor("fake", s.attestor))
h := &Handler{
Manager: s.manager,
Catalog: catalog,
Log: log,
Metrics: s.metrics,
}
s.h = h
s.ctrl = mockCtrl
}
func (s *HandlerTestSuite) TearDownTest() {
s.ctrl.Finish()
}
func (s *HandlerTestSuite) TestFetchX509SVID() {
// Without the security header
stream := mock_workload.NewMockSpiffeWorkloadAPI_FetchX509SVIDServer(s.ctrl)
stream.EXPECT().Context().Return(context.Background())
err := s.h.FetchX509SVID(nil, stream)
s.Assert().Error(err)
// Without PID data
ctx := makeContext(0)
stream.EXPECT().Context().Return(ctx)
err = s.h.FetchX509SVID(nil, stream)
s.Assert().Error(err)
ctx, cancel := context.WithCancel(makeContext(1))
defer cancel()
selectors := []*common.Selector{{Type: "foo", Value: "bar"}}
subscriber := mock_cache.NewMockSubscriber(s.ctrl)
subscription := make(chan *cache.WorkloadUpdate)
subscriber.EXPECT().Updates().Return(subscription).AnyTimes()
subscriber.EXPECT().Finish()
result := make(chan error, 1)
stream.EXPECT().Context().Return(ctx).AnyTimes()
s.attestor.SetSelectors(1, selectors)
s.manager.EXPECT().SubscribeToCacheChanges(cache.Selectors{selectors[0]}).Return(subscriber)
stream.EXPECT().Send(gomock.Any())
statusLabel := telemetry.Label{Name: telemetry.Status, Value: codes.OK.String()}
setupMetricsCommonExpectations(s.metrics, len(selectors), statusLabel)
labels := []telemetry.Label{
{Name: telemetry.SVIDType, Value: telemetry.X509},
statusLabel,
}
s.metrics.EXPECT().SetGaugeWithLabels(
[]string{telemetry.WorkloadAPI, telemetry.FetchX509SVID, telemetry.TTL},
gomock.Any(),
[]telemetry.Label{
{Name: telemetry.SPIFFEID, Value: "spiffe://example.org/foo"},
})
s.metrics.EXPECT().IncrCounterWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchX509SVID}, float32(1), labels)
s.metrics.EXPECT().MeasureSinceWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchX509SVID, telemetry.ElapsedTime}, gomock.Any(), labels)
s.metrics.EXPECT().MeasureSince([]string{telemetry.WorkloadAPI, telemetry.SVIDResponseLatency, telemetry.Fetch}, gomock.Any())
go func() { result <- s.h.FetchX509SVID(nil, stream) }()
// Make sure it's still running...
select {
case err := <-result:
s.T().Errorf("hander exited immediately: %v", err)
case <-time.NewTimer(1 * time.Millisecond).C:
}
select {
case <-time.NewTimer(1 * time.Second).C:
s.T().Error("timeout sending update to workload handler")
case subscription <- s.workloadUpdate():
}
cancel()
select {
case err := <-result:
s.Assert().NoError(err)
case <-time.NewTimer(1 * time.Second).C:
s.T().Error("workload handler hung, shutdown timer exceeded")
}
}
func (s *HandlerTestSuite) TestSendX509Response() {
stream := mock_workload.NewMockSpiffeWorkloadAPI_FetchX509SVIDServer(s.ctrl)
emptyUpdate := new(cache.WorkloadUpdate)
stream.EXPECT().Send(gomock.Any()).Times(0)
labels := []telemetry.Label{
{Name: telemetry.SVIDType, Value: telemetry.X509},
{Name: telemetry.Status, Value: codes.PermissionDenied.String()},
}
s.metrics.EXPECT().IncrCounterWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchX509SVID}, float32(1), labels)
s.metrics.EXPECT().MeasureSinceWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchX509SVID, telemetry.ElapsedTime}, gomock.Any(), labels)
err := s.h.sendX509SVIDResponse(emptyUpdate, stream, s.h.Metrics, []*common.Selector{})
s.Assert().Error(err)
resp, err := s.h.composeX509SVIDResponse(s.workloadUpdate())
s.Require().NoError(err)
stream.EXPECT().Send(resp)
statusLabel := telemetry.Label{Name: telemetry.Status, Value: codes.OK.String()}
labels = []telemetry.Label{
{Name: telemetry.SVIDType, Value: telemetry.X509},
statusLabel,
}
s.metrics.EXPECT().SetGaugeWithLabels(
[]string{telemetry.WorkloadAPI, telemetry.FetchX509SVID, telemetry.TTL},
gomock.Any(),
[]telemetry.Label{
{Name: telemetry.SPIFFEID, Value: "spiffe://example.org/foo"},
})
s.metrics.EXPECT().IncrCounterWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchX509SVID}, float32(1), labels)
s.metrics.EXPECT().MeasureSinceWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchX509SVID, telemetry.ElapsedTime}, gomock.Any(), labels)
err = s.h.sendX509SVIDResponse(s.workloadUpdate(), stream, s.h.Metrics, []*common.Selector{})
s.Assert().NoError(err)
}
func (s *HandlerTestSuite) TestComposeX509Response() {
update := s.workloadUpdate()
keyData, err := x509.MarshalPKCS8PrivateKey(update.Identities[0].PrivateKey)
s.Require().NoError(err)
svidMsg := &workload.X509SVID{
SpiffeId: "spiffe://example.org/foo",
X509Svid: update.Identities[0].SVID[0].Raw,
X509SvidKey: keyData,
Bundle: update.Bundle.RootCAs()[0].Raw,
FederatesWith: []string{"spiffe://otherdomain.test"},
}
apiMsg := &workload.X509SVIDResponse{
Svids: []*workload.X509SVID{svidMsg},
FederatedBundles: map[string][]byte{
"spiffe://otherdomain.test": update.Bundle.RootCAs()[0].Raw,
},
}
resp, err := s.h.composeX509SVIDResponse(s.workloadUpdate())
s.Assert().NoError(err)
s.Assert().Equal(apiMsg, resp)
}
func (s *HandlerTestSuite) TestFetchJWTSVID() {
audience := []string{"foo"}
// request missing audience
resp, err := s.h.FetchJWTSVID(context.Background(), &workload.JWTSVIDRequest{})
s.requireErrorContains(err, "audience must be specified")
s.Require().Nil(resp)
// missing security header
resp, err = s.h.FetchJWTSVID(context.Background(), &workload.JWTSVIDRequest{
Audience: audience,
})
s.requireErrorContains(err, "Security header missing from request")
s.Require().Nil(resp)
// missing peer info
resp, err = s.h.FetchJWTSVID(makeContext(0), &workload.JWTSVIDRequest{
Audience: audience,
})
s.requireErrorContains(err, "Unable to fetch watcher from context")
s.Require().Nil(resp)
// no identity issued
selectors := []*common.Selector{{Type: "foo", Value: "bar"}}
s.attestor.SetSelectors(1, selectors)
s.manager.EXPECT().MatchingIdentities(selectors).Return(nil)
statusLabel := telemetry.Label{Name: telemetry.Status, Value: codes.PermissionDenied.String()}
attestorStatusLabel := telemetry.Label{Name: telemetry.Status, Value: codes.OK.String()}
setupMetricsCommonExpectations(s.metrics, len(selectors), attestorStatusLabel)
labels := []telemetry.Label{
{Name: telemetry.SVIDType, Value: telemetry.JWT},
statusLabel,
}
s.metrics.EXPECT().IncrCounterWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchJWTSVID}, float32(1), labels)
s.metrics.EXPECT().MeasureSinceWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchJWTSVID, telemetry.ElapsedTime}, gomock.Any(), labels)
resp, err = s.h.FetchJWTSVID(makeContext(1), &workload.JWTSVIDRequest{
Audience: audience,
})
s.requireErrorContains(err, "no identity issued")
s.Require().Nil(resp)
// fetch SVIDs for all SPIFFE IDs
identities := []cache.Identity{
{
Entry: &common.RegistrationEntry{
SpiffeId: "spiffe://example.org/one",
},
},
{
Entry: &common.RegistrationEntry{
SpiffeId: "spiffe://example.org/two",
},
},
}
s.attestor.SetSelectors(1, selectors)
s.manager.EXPECT().MatchingIdentities(selectors).Return(identities)
ONE := &client.JWTSVID{Token: "ONE"}
TWO := &client.JWTSVID{Token: "TWO"}
s.manager.EXPECT().FetchJWTSVID(gomock.Any(), "spiffe://example.org/one", audience).Return(ONE, nil)
s.manager.EXPECT().FetchJWTSVID(gomock.Any(), "spiffe://example.org/two", audience).Return(TWO, nil)
statusLabel = telemetry.Label{Name: telemetry.Status, Value: codes.OK.String()}
setupMetricsCommonExpectations(s.metrics, len(selectors), statusLabel)
labels = []telemetry.Label{
{Name: telemetry.SVIDType, Value: telemetry.JWT},
statusLabel,
}
s.metrics.EXPECT().SetGaugeWithLabels(
[]string{telemetry.WorkloadAPI, telemetry.FetchJWTSVID, telemetry.TTL},
gomock.Any(),
[]telemetry.Label{
{
Name: telemetry.SPIFFEID, Value: "spiffe://example.org/one",
},
})
s.metrics.EXPECT().SetGaugeWithLabels(
[]string{telemetry.WorkloadAPI, telemetry.FetchJWTSVID, telemetry.TTL},
gomock.Any(),
[]telemetry.Label{
{
Name: telemetry.SPIFFEID, Value: "spiffe://example.org/two",
},
})
s.metrics.EXPECT().IncrCounterWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchJWTSVID}, float32(1), labels)
s.metrics.EXPECT().MeasureSinceWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchJWTSVID, telemetry.ElapsedTime}, gomock.Any(), labels)
resp, err = s.h.FetchJWTSVID(makeContext(1), &workload.JWTSVIDRequest{
Audience: audience,
})
s.Require().NoError(err)
s.Require().Equal(&workload.JWTSVIDResponse{
Svids: []*workload.JWTSVID{
{
SpiffeId: "spiffe://example.org/one",
Svid: "ONE",
},
{
SpiffeId: "spiffe://example.org/two",
Svid: "TWO",
},
},
}, resp)
// fetch SVIDs for specific SPIFFE ID
s.attestor.SetSelectors(1, selectors)
s.manager.EXPECT().MatchingIdentities(selectors).Return(identities)
s.manager.EXPECT().FetchJWTSVID(gomock.Any(), "spiffe://example.org/two", audience).Return(TWO, nil)
statusLabel = telemetry.Label{Name: telemetry.Status, Value: codes.OK.String()}
setupMetricsCommonExpectations(s.metrics, len(selectors), statusLabel)
labels = []telemetry.Label{
{Name: telemetry.SVIDType, Value: telemetry.JWT},
statusLabel,
}
s.metrics.EXPECT().SetGaugeWithLabels(
[]string{telemetry.WorkloadAPI, telemetry.FetchJWTSVID, telemetry.TTL},
gomock.Any(),
[]telemetry.Label{
{
Name: telemetry.SPIFFEID, Value: "spiffe://example.org/two",
},
})
s.metrics.EXPECT().IncrCounterWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchJWTSVID}, float32(1), labels)
s.metrics.EXPECT().MeasureSinceWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchJWTSVID, telemetry.ElapsedTime}, gomock.Any(), labels)
resp, err = s.h.FetchJWTSVID(makeContext(1), &workload.JWTSVIDRequest{
SpiffeId: "spiffe://example.org/two",
Audience: audience,
})
s.Require().NoError(err)
s.Require().Equal(&workload.JWTSVIDResponse{
Svids: []*workload.JWTSVID{
{
SpiffeId: "spiffe://example.org/two",
Svid: "TWO",
},
},
}, resp)
}
func setupMetricsCommonExpectations(metrics *mock_telemetry.MockMetrics, selectorsCount int, statusLabel telemetry.Label) {
attestorLabels := []telemetry.Label{{telemetry.Attestor, "fake"}, statusLabel}
metrics.EXPECT().IncrCounterWithLabels([]string{telemetry.WorkloadAPI, telemetry.WorkloadAttestorLatency}, float32(1), attestorLabels)
metrics.EXPECT().MeasureSinceWithLabels([]string{telemetry.WorkloadAPI, telemetry.WorkloadAttestorLatency, telemetry.ElapsedTime}, gomock.Any(), attestorLabels)
metrics.EXPECT().AddSample([]string{telemetry.WorkloadAPI, telemetry.DiscoveredSelectors}, float32(selectorsCount))
metrics.EXPECT().MeasureSince([]string{telemetry.WorkloadAPI, telemetry.WorkloadAttestationDuration}, gomock.Any())
metrics.EXPECT().IncrCounter([]string{telemetry.WorkloadAPI, telemetry.Connection}, float32(1))
metrics.EXPECT().SetGauge([]string{telemetry.WorkloadAPI, telemetry.Connections}, float32(1))
metrics.EXPECT().SetGauge([]string{telemetry.WorkloadAPI, telemetry.Connections}, float32(0))
}
func (s *HandlerTestSuite) TestFetchJWTBundles() {
stream := mock_workload.NewMockSpiffeWorkloadAPI_FetchJWTBundlesServer(s.ctrl)
// missing security header
stream.EXPECT().Context().Return(context.Background())
err := s.h.FetchJWTBundles(&workload.JWTBundlesRequest{}, stream)
s.requireErrorContains(err, "Security header missing from request")
// missing peer info
stream.EXPECT().Context().Return(makeContext(0))
err = s.h.FetchJWTBundles(&workload.JWTBundlesRequest{}, stream)
s.requireErrorContains(err, "Unable to fetch watcher from context")
// success
ctx, cancel := context.WithCancel(makeContext(1))
defer cancel()
selectors := []*common.Selector{{Type: "foo", Value: "bar"}}
subscriber := mock_cache.NewMockSubscriber(s.ctrl)
subscription := make(chan *cache.WorkloadUpdate)
subscriber.EXPECT().Updates().Return(subscription).AnyTimes()
subscriber.EXPECT().Finish()
result := make(chan error, 1)
stream.EXPECT().Context().Return(ctx).AnyTimes()
s.attestor.SetSelectors(1, selectors)
s.manager.EXPECT().SubscribeToCacheChanges(cache.Selectors{selectors[0]}).Return(subscriber)
stream.EXPECT().Send(&workload.JWTBundlesResponse{
Bundles: map[string][]byte{
"spiffe://example.org": []byte("{\n \"keys\": null\n}"),
"spiffe://otherdomain.test": []byte("{\n \"keys\": null\n}"),
},
})
statusLabel := telemetry.Label{Name: telemetry.Status, Value: codes.OK.String()}
setupMetricsCommonExpectations(s.metrics, len(selectors), statusLabel)
labels := []telemetry.Label{
{Name: telemetry.SVIDType, Value: telemetry.JWT},
statusLabel,
}
s.metrics.EXPECT().IncrCounter([]string{telemetry.WorkloadAPI, telemetry.FetchJWTBundles}, float32(1))
s.metrics.EXPECT().IncrCounter([]string{telemetry.WorkloadAPI, telemetry.BundlesUpdate, telemetry.JWT}, float32(1))
s.metrics.EXPECT().IncrCounterWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchJWTBundles}, gomock.Any(), labels)
s.metrics.EXPECT().MeasureSinceWithLabels([]string{telemetry.WorkloadAPI, telemetry.FetchJWTBundles, telemetry.ElapsedTime}, gomock.Any(), labels)
s.metrics.EXPECT().MeasureSince([]string{telemetry.WorkloadAPI, telemetry.SendJWTBundleLatency}, gomock.Any())
go func() { result <- s.h.FetchJWTBundles(&workload.JWTBundlesRequest{}, stream) }()
// Make sure it's still running...
select {
case err := <-result:
s.T().Errorf("hander exited immediately: %v", err)
case <-time.NewTimer(1 * time.Millisecond).C:
}
select {
case <-time.NewTimer(1 * time.Second).C:
s.T().Error("timeout sending update to workload handler")
case subscription <- s.workloadUpdate():
}
cancel()
select {
case err := <-result:
s.Assert().NoError(err)
case <-time.NewTimer(1 * time.Second).C:
s.T().Error("workload handler hung, shutdown timer exceeded")
}
}
func (s *HandlerTestSuite) TestComposeJWTBundlesResponse() {
pkixBytes, err := base64.StdEncoding.DecodeString("MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEYSlUVLqTD8DEnA4F1EWMTf5RXc5lnCxw+5WKJwngEL3rPc9i4Tgzz9riR3I/NiSlkgRO1WsxBusqpC284j9dXA==")
s.Require().NoError(err)
// no bundles in update
resp, err := s.h.composeJWTBundlesResponse(&cache.WorkloadUpdate{})
s.Require().NoError(err)
s.Require().Empty(resp.Bundles)
// bundles in update
hasKeysBundle, err := bundleutil.BundleFromProto(&common.Bundle{
TrustDomainId: "spiffe://has-keys.test",
JwtSigningKeys: []*common.PublicKey{
{
Kid: "kid",
PkixBytes: pkixBytes,
},
},
})
s.Require().NoError(err)
noKeysBundle, err := bundleutil.BundleFromProto(&common.Bundle{
TrustDomainId: "spiffe://no-keys.test",
})
s.Require().NoError(err)
resp, err = s.h.composeJWTBundlesResponse(&cache.WorkloadUpdate{
Bundle: hasKeysBundle,
FederatedBundles: map[string]*bundleutil.Bundle{
"spiffe://no-keys.test": noKeysBundle,
},
})
s.Require().NoError(err)
s.Require().Len(resp.Bundles, 2)
s.JSONEq(`{
"keys": [
{
"kid":"kid",
"use":"jwt-svid",
"kty":"EC",
"crv":"P-256",
"x":"YSlUVLqTD8DEnA4F1EWMTf5RXc5lnCxw-5WKJwngEL0",
"y":"6z3PYuE4M8_a4kdyPzYkpZIETtVrMQbrKqQtvOI_XVw"
}
]
}`, string(resp.Bundles["spiffe://has-keys.test"]))
s.JSONEq(`{
"keys": null
}`, string(resp.Bundles["spiffe://no-keys.test"]))
}
func (s *HandlerTestSuite) TestValidateJWTSVID() {
selectors := []*common.Selector{{Type: "foo", Value: "bar"}}
s.attestor.SetSelectors(1, selectors)
attestorStatusLabel := telemetry.Label{Name: telemetry.Status, Value: codes.OK.String()}
// build up bundle that has the JWT signing public key
pkixBytes, err := x509.MarshalPKIXPublicKey(jwtSigningKey.Public())
s.Require().NoError(err)
bundle, err := bundleutil.BundleFromProto(&common.Bundle{
TrustDomainId: "spiffe://example.org",
JwtSigningKeys: []*common.PublicKey{
{
Kid: "kid",
PkixBytes: pkixBytes,
},
},
})
s.Require().NoError(err)
// Sign a token with an issuer
jwtSigner := jwtsvid.NewSigner(jwtsvid.SignerConfig{
Issuer: "issuer",
})
svid, err := jwtSigner.SignToken(
"spiffe://example.org/blog",
[]string{"audience"},
time.Now().Add(time.Minute),
jwtSigningKey,
"kid",
)
s.Require().NoError(err)
// Sign a token without an issuer
jwtSignerNoIssuer := jwtsvid.NewSigner(jwtsvid.SignerConfig{})
svidNoIssuer, err := jwtSignerNoIssuer.SignToken(
"spiffe://example.org/blog",
[]string{"audience"},
time.Now().Add(time.Minute),
jwtSigningKey,
"kid",
)
s.Require().NoError(err)
testCases := []struct {
name string
ctx context.Context
req *workload.ValidateJWTSVIDRequest
workloadUpdate *cache.WorkloadUpdate
code codes.Code
msg string
labels []telemetry.Label
issuer string
}{
{
name: "no audience",
ctx: makeContext(1),
req: &workload.ValidateJWTSVIDRequest{
Svid: "svid",
},
code: codes.InvalidArgument,
msg: "audience must be specified",
},
{
name: "no svid",
ctx: makeContext(1),
req: &workload.ValidateJWTSVIDRequest{
Audience: "audience",
},
code: codes.InvalidArgument,
msg: "svid must be specified",
},
{
name: "missing security header",
ctx: context.Background(),
req: &workload.ValidateJWTSVIDRequest{
Audience: "audience",
Svid: "svid",
},
code: codes.InvalidArgument,
msg: "Security header missing from request",
},
{
name: "missing peer info",
ctx: makeContext(0),
req: &workload.ValidateJWTSVIDRequest{
Audience: "audience",
Svid: "svid",
},
code: codes.Internal,
msg: "Is this a supported system? Please report this bug: Unable to fetch watcher from context",
},
{
name: "malformed token",
ctx: makeContext(1),
req: &workload.ValidateJWTSVIDRequest{
Audience: "audience",
Svid: "svid",
},
workloadUpdate: &cache.WorkloadUpdate{},
code: codes.InvalidArgument,
msg: "token contains an invalid number of segments",
},
{
name: "validated by our trust domain bundle",
ctx: makeContext(1),
req: &workload.ValidateJWTSVIDRequest{
Audience: "audience",
Svid: svid,
},
workloadUpdate: &cache.WorkloadUpdate{
Bundle: bundle,
},
code: codes.OK,
labels: []telemetry.Label{
{Name: telemetry.Subject, Value: "spiffe://example.org/blog"},
{Name: telemetry.Audience, Value: "audience"},
},
issuer: "issuer",
},
{
name: "validated by our trust domain bundle",
ctx: makeContext(1),
req: &workload.ValidateJWTSVIDRequest{
Audience: "audience",
Svid: svid,
},
workloadUpdate: &cache.WorkloadUpdate{
FederatedBundles: map[string]*bundleutil.Bundle{
"spiffe://example.org": bundle,
},
},
code: codes.OK,
labels: []telemetry.Label{
{Name: telemetry.Subject, Value: "spiffe://example.org/blog"},
{Name: telemetry.Audience, Value: "audience"},
},
issuer: "issuer",
},
{
name: "validate token without an issuer",
ctx: makeContext(1),
req: &workload.ValidateJWTSVIDRequest{
Audience: "audience",
Svid: svidNoIssuer,
},
workloadUpdate: &cache.WorkloadUpdate{
Bundle: bundle,
},
code: codes.OK,
labels: []telemetry.Label{
{Name: telemetry.Subject, Value: "spiffe://example.org/blog"},
{Name: telemetry.Audience, Value: "audience"},
},
},
}
for _, testCase := range testCases {
s.T().Run(testCase.name, func(t *testing.T) {
if testCase.workloadUpdate != nil {
// Setup a bunch of expectations around metrics if the test
// is expecting to successfully attest (i.e. return a
// workload update)
s.manager.EXPECT().FetchWorkloadUpdate(selectors).Return(testCase.workloadUpdate)
setupMetricsCommonExpectations(s.metrics, len(selectors), attestorStatusLabel)
if len(testCase.labels) > 0 {
s.metrics.EXPECT().IncrCounterWithLabels([]string{telemetry.WorkloadAPI, telemetry.ValidateJWTSVID}, float32(1), testCase.labels)
} else {
s.metrics.EXPECT().IncrCounter([]string{telemetry.WorkloadAPI, telemetry.ValidateJWTSVID}, float32(1))
}
}
resp, err := s.h.ValidateJWTSVID(testCase.ctx, testCase.req)
if testCase.code != codes.OK {
spiretest.RequireGRPCStatus(t, err, testCase.code, testCase.msg)
require.Nil(t, resp)
return
}
spiretest.RequireGRPCStatus(t, err, testCase.code, "")
require.NotNil(t, resp)
require.Equal(t, "spiffe://example.org/blog", resp.SpiffeId)
require.NotNil(t, resp.Claims)
expectedNumFields := 4
if testCase.issuer != "" {
expectedNumFields++
}
assert.Len(t, resp.Claims.Fields, expectedNumFields)
// verify audience
spiretest.AssertProtoEqual(t,
&structpb.Value{
Kind: &structpb.Value_StringValue{
StringValue: "audience",
},
}, resp.Claims.Fields["aud"])
// verify expiration is set
assert.NotEmpty(t, resp.Claims.Fields["exp"])
// verify issued at is set
assert.NotEmpty(t, resp.Claims.Fields["iat"])
// verify issuer
if testCase.issuer != "" {
spiretest.AssertProtoEqual(t,
&structpb.Value{
Kind: &structpb.Value_StringValue{
StringValue: testCase.issuer,
},
}, resp.Claims.Fields["iss"])
} else {
assert.Nil(t, resp.Claims.Fields["iss"])
}
// verify subject
spiretest.AssertProtoEqual(t,
&structpb.Value{
Kind: &structpb.Value_StringValue{
StringValue: "spiffe://example.org/blog",
},
}, resp.Claims.Fields["sub"])
})
}
}
func (s *HandlerTestSuite) TestStructFromValues() {
expected := &structpb.Struct{
Fields: map[string]*structpb.Value{
"foo": {
Kind: &structpb.Value_StringValue{
StringValue: "bar",
},
},
"baz": {
Kind: &structpb.Value_NumberValue{
NumberValue: 3.0,
},
},
},
}
actual, err := structFromValues(map[string]interface{}{
"foo": "bar",
"baz": 3,
})
s.Require().NoError(err)
s.Require().Equal(expected, actual)
}
func (s *HandlerTestSuite) TestPeerWatcher() {
p := &peer.Peer{
AuthInfo: peertracker.AuthInfo{
Watcher: FakeWatcher{},
},
}
ctx := peer.NewContext(context.Background(), p)
watcher, err := s.h.peerWatcher(ctx)
s.Assert().NoError(err)
s.Assert().Equal(int32(1), watcher.PID())
// Implementation error - custom auth creds not in use
p.AuthInfo = nil
ctx = peer.NewContext(context.Background(), p)
_, err = s.h.peerWatcher(ctx)
s.Assert().Error(err)
}
func (s *HandlerTestSuite) workloadUpdate() *cache.WorkloadUpdate {
svid, key, err := util.LoadSVIDFixture()
s.Require().NoError(err)
ca, _, err := util.LoadCAFixture()
s.Require().NoError(err)
identity := cache.Identity{
SVID: []*x509.Certificate{svid},
PrivateKey: key,
Entry: &common.RegistrationEntry{
SpiffeId: "spiffe://example.org/foo",
FederatesWith: []string{"spiffe://otherdomain.test"},
},
}
update := &cache.WorkloadUpdate{
Identities: []cache.Identity{identity},
Bundle: bundleutil.BundleFromRootCA("spiffe://example.org", ca),
FederatedBundles: map[string]*bundleutil.Bundle{
"spiffe://otherdomain.test": bundleutil.BundleFromRootCA("spiffe://otherdomain.test", ca),
},
}
return update
}
func (s *HandlerTestSuite) requireErrorContains(err error, contains string) {
s.Require().Error(err)
s.Require().Contains(err.Error(), contains)
}
func makeContext(pid int) context.Context {
header := metadata.Pairs("workload.spiffe.io", "true")
ctx := context.Background()
ctx = metadata.NewIncomingContext(ctx, header)
if pid > 0 {
ctx = peer.NewContext(ctx, &peer.Peer{
AuthInfo: peertracker.AuthInfo{
Watcher: FakeWatcher{},
},
})
}
return ctx
}
type FakeWatcher struct{}
func (w FakeWatcher) Close() {}
func (w FakeWatcher) IsAlive() error { return nil }
func (w FakeWatcher) PID() int32 { return 1 }
| 1 | 12,020 | Hmm leaking the library name out in the Workload API doesn't seem great. Do we want to catch these errors and return a canned "could not parse token" or something instead? | spiffe-spire | go |
@@ -9,12 +9,14 @@ module Travis
}
def install
- self.if '-f build.gradle', 'gradle assemble', fold: 'install', retry: true
+ self.if '-f ./gradlew', './gradlew assemble', fold: 'install', retry: true
+ self.elif '-f build.gradle', 'gradle assemble', fold: 'install', retry: true
self.elif '-f pom.xml', 'mvn install -DskipTests=true -B', fold: 'install', retry: true # Otherwise mvn install will run tests which. Suggestion from Charles Nutter. MK.
end
def script
- self.if '-f build.gradle', 'gradle check'
+ self.if '-f ./gradlew', './gradlew check'
+ self.elif '-f build.gradle', 'gradle check'
self.elif '-f pom.xml', 'mvn test -B'
self.else 'ant test'
end | 1 | module Travis
module Build
class Script
class Jvm < Script
include Jdk
DEFAULTS = {
jdk: 'default'
}
def install
self.if '-f build.gradle', 'gradle assemble', fold: 'install', retry: true
self.elif '-f pom.xml', 'mvn install -DskipTests=true -B', fold: 'install', retry: true # Otherwise mvn install will run tests which. Suggestion from Charles Nutter. MK.
end
def script
self.if '-f build.gradle', 'gradle check'
self.elif '-f pom.xml', 'mvn test -B'
self.else 'ant test'
end
end
end
end
end
| 1 | 10,761 | I think `-f gradlew` is better for code unity | travis-ci-travis-build | rb |
@@ -40,4 +40,10 @@ public class FlinkConfigOptions {
.intType()
.defaultValue(100)
.withDescription("Sets max infer parallelism for source operator.");
+
+ public static final ConfigOption<Integer> SOURCE_READER_FETCH_RECORD_BATCH_SIZE = ConfigOptions
+ .key("source.iceberg.reader.fetch-record-batch-size")
+ .intType()
+ .defaultValue(2048)
+ .withDescription("The target record batch size for split reader fetch.");
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
public class FlinkConfigOptions {
private FlinkConfigOptions() {
}
public static final ConfigOption<Boolean> TABLE_EXEC_ICEBERG_INFER_SOURCE_PARALLELISM =
ConfigOptions.key("table.exec.iceberg.infer-source-parallelism")
.booleanType()
.defaultValue(true)
.withDescription("If is false, parallelism of source are set by config.\n" +
"If is true, source parallelism is inferred according to splits number.\n");
public static final ConfigOption<Integer> TABLE_EXEC_ICEBERG_INFER_SOURCE_PARALLELISM_MAX =
ConfigOptions.key("table.exec.iceberg.infer-source-parallelism.max")
.intType()
.defaultValue(100)
.withDescription("Sets max infer parallelism for source operator.");
}
| 1 | 34,691 | Is there precedent for this config key? What other keys are similar? The others in this file start with `table.exec.iceberg`. Is there a reason for not continuing with that convention? | apache-iceberg | java |
@@ -1,4 +1,4 @@
-using System;
+using System;
using System.Collections.Generic;
using System.Globalization;
using System.Linq.Expressions; | 1 | using System;
using System.Collections.Generic;
using System.Globalization;
using System.Linq.Expressions;
using System.Reflection;
using System.Text;
using MvvmCross.Platform.Logging;
namespace MvvmCross.Core.Platform.LogProviders
{
internal sealed class ConsoleLogProvider : MvxBaseLogProvider
{
private static readonly Type ConsoleType;
private static readonly Type ConsoleColorType;
private static readonly Action<string> ConsoleWriteLine;
private static readonly Func<int> GetConsoleForeground;
private static readonly Action<int> SetConsoleForeground;
private static readonly IDictionary<MvxLogLevel, int> Colors;
static ConsoleLogProvider()
{
ConsoleType = Type.GetType("System.Console");
ConsoleColorType = ConsoleColorValues.Type;
if (!IsLoggerAvailable())
{
throw new InvalidOperationException("System.Console or System.ConsoleColor type not found");
}
MessageFormatter = DefaultMessageFormatter;
Colors = new Dictionary<MvxLogLevel, int>
{
{MvxLogLevel.Fatal, ConsoleColorValues.Red},
{MvxLogLevel.Error, ConsoleColorValues.Yellow},
{MvxLogLevel.Warn, ConsoleColorValues.Magenta},
{MvxLogLevel.Info, ConsoleColorValues.White},
{MvxLogLevel.Debug, ConsoleColorValues.Gray},
{MvxLogLevel.Trace, ConsoleColorValues.DarkGray},
};
ConsoleWriteLine = GetConsoleWrite();
GetConsoleForeground = GetGetConsoleForeground();
SetConsoleForeground = GetSetConsoleForeground();
}
internal static bool IsLoggerAvailable()
=> ConsoleType != null && ConsoleColorType != null;
protected override Logger GetLogger(string name)
=> new ColouredConsoleLogger(name, ConsoleWriteLine, GetConsoleForeground, SetConsoleForeground).Log;
internal delegate string MessageFormatterDelegate(
string loggerName,
MvxLogLevel level,
object message,
Exception e);
internal static MessageFormatterDelegate MessageFormatter { get; set; }
private static string DefaultMessageFormatter(string loggerName, MvxLogLevel level, object message, Exception e)
{
var stringBuilder = new StringBuilder();
stringBuilder.Append(DateTime.Now.ToString("yyyy-MM-dd hh:mm:ss", CultureInfo.InvariantCulture));
stringBuilder.Append(" ");
// Append a readable representation of the log level
stringBuilder.Append(("[" + level.ToString().ToUpper() + "]").PadRight(8));
stringBuilder.Append("(" + loggerName + ") ");
// Append the message
stringBuilder.Append(message);
// Append stack trace if there is an exception
if (e != null)
{
stringBuilder.Append(Environment.NewLine).Append(e.GetType());
stringBuilder.Append(Environment.NewLine).Append(e.Message);
stringBuilder.Append(Environment.NewLine).Append(e.StackTrace);
}
return stringBuilder.ToString();
}
private static Action<string> GetConsoleWrite()
{
var messageParameter = Expression.Parameter(typeof(string), "message");
MethodInfo writeMethod = ConsoleType.GetMethodPortable("WriteLine", typeof(string));
var writeExpression = Expression.Call(writeMethod, messageParameter);
return Expression.Lambda<Action<string>>(
writeExpression, messageParameter).Compile();
}
private static Func<int> GetGetConsoleForeground()
{
MethodInfo getForeground = ConsoleType.GetPropertyPortable("ForegroundColor").GetGetMethod();
var getForegroundExpression = Expression.Convert(Expression.Call(getForeground), typeof(int));
return Expression.Lambda<Func<int>>(getForegroundExpression).Compile();
}
private static Action<int> GetSetConsoleForeground()
{
var colorParameter = Expression.Parameter(typeof(int), "color");
MethodInfo setForeground = ConsoleType.GetPropertyPortable("ForegroundColor").GetSetMethod();
var setForegroundExpression = Expression.Call(setForeground,
Expression.Convert(colorParameter, ConsoleColorType));
return Expression.Lambda<Action<int>>(
setForegroundExpression, colorParameter).Compile();
}
public class ColouredConsoleLogger
{
private readonly string _name;
private readonly Action<string> _write;
private readonly Func<int> _getForeground;
private readonly Action<int> _setForeground;
public ColouredConsoleLogger(string name, Action<string> write,
Func<int> getForeground, Action<int> setForeground)
{
_name = name;
_write = write;
_getForeground = getForeground;
_setForeground = setForeground;
}
public bool Log(MvxLogLevel logLevel, Func<string> messageFunc, Exception exception,
params object[] formatParameters)
{
if (messageFunc == null)
{
return true;
}
messageFunc = LogMessageFormatter.SimulateStructuredLogging(messageFunc, formatParameters);
Write(logLevel, messageFunc(), exception);
return true;
}
protected void Write(MvxLogLevel logLevel, string message, Exception e = null)
{
var formattedMessage = MessageFormatter(_name, logLevel, message, e);
int color;
if (Colors.TryGetValue(logLevel, out color))
{
var originalColor = _getForeground();
try
{
_setForeground(color);
_write(formattedMessage);
}
finally
{
_setForeground(originalColor);
}
}
else
{
_write(formattedMessage);
}
}
}
private static class ConsoleColorValues
{
internal static readonly Type Type;
internal static readonly int Red;
internal static readonly int Yellow;
internal static readonly int Magenta;
internal static readonly int White;
internal static readonly int Gray;
internal static readonly int DarkGray;
static ConsoleColorValues()
{
Type = Type.GetType("System.ConsoleColor");
if (Type == null) return;
Red = (int)Enum.Parse(Type, "Red", false);
Yellow = (int)Enum.Parse(Type, "Yellow", false);
Magenta = (int)Enum.Parse(Type, "Magenta", false);
White = (int)Enum.Parse(Type, "White", false);
Gray = (int)Enum.Parse(Type, "Gray", false);
DarkGray = (int)Enum.Parse(Type, "DarkGray", false);
}
}
}
}
| 1 | 13,338 | Can we avoid no-change changes being committed - they make it harder to distinguish actual changes from code editor changes | MvvmCross-MvvmCross | .cs |
@@ -1,4 +1,4 @@
-//snippet-sourcedescription:[UpdateServerCertificate.java demonstrates how to update the name of an AWS Identity and Access Management (IAM) server certificate.]
+//snippet-sourcedescription:[UpdateServerCertificate.java demonstrates how to update the name of an AWS Identity and Access Management (AWS IAM) server certificate.]
//snippet-keyword:[AWS SDK for Java v2]
//snippet-keyword:[Code Sample]
//snippet-service:[AWS IAM] | 1 | //snippet-sourcedescription:[UpdateServerCertificate.java demonstrates how to update the name of an AWS Identity and Access Management (IAM) server certificate.]
//snippet-keyword:[AWS SDK for Java v2]
//snippet-keyword:[Code Sample]
//snippet-service:[AWS IAM]
//snippet-sourcetype:[full-example]
//snippet-sourcedate:[11/02/2020]
//snippet-sourceauthor:[scmacdon-aws]
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package com.example.iam;
// snippet-start:[iam.java2.update_server_certificate.complete]
// snippet-start:[iam.java2.update_server_certificate.import]
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.iam.IamClient;
import software.amazon.awssdk.services.iam.model.IamException;
import software.amazon.awssdk.services.iam.model.UpdateServerCertificateRequest;
import software.amazon.awssdk.services.iam.model.UpdateServerCertificateResponse;
// snippet-end:[iam.java2.update_server_certificate.import]
public class UpdateServerCertificate {
public static void main(String[] args) {
final String USAGE = "\n" +
"Usage:\n" +
" UpdateServerCertificate <curName> <newName> \n\n" +
"Where:\n" +
" curName - the current certificate name. \n\n" +
" newName - an updated certificate name. \n\n" ;
if (args.length != 2) {
System.out.println(USAGE);
System.exit(1);
}
// Read the command line arguments
String curName = args[0];
String newName = args[1];
Region region = Region.AWS_GLOBAL;
IamClient iam = IamClient.builder()
.region(region)
.build();
updateCertificate(iam, curName, newName) ;
System.out.println("Done");
iam.close();
}
// snippet-start:[iam.java2.update_server_certificate.main]
public static void updateCertificate(IamClient iam, String curName, String newName) {
try {
UpdateServerCertificateRequest request =
UpdateServerCertificateRequest.builder()
.serverCertificateName(curName)
.newServerCertificateName(newName)
.build();
UpdateServerCertificateResponse response =
iam.updateServerCertificate(request);
System.out.printf("Successfully updated server certificate to name %s",
newName);
} catch (IamException e) {
System.err.println(e.awsErrorDetails().errorMessage());
System.exit(1);
}
}
// snippet-end:[iam.java2.update_server_certificate.main]
}
// snippet-end:[iam.java2.update_server_certificate.complete]
| 1 | 18,253 | AWS Identity and Access Management (IAM) | awsdocs-aws-doc-sdk-examples | rb |
@@ -83,14 +83,10 @@ func NewCommand(licenseCommandName string) *cli.Command {
cmd.RegisterSignalCallback(func() { errorChannel <- nil })
cmdService := &serviceCommand{
- tequilapi: client.NewClient(nodeOptions.TequilapiAddress, nodeOptions.TequilapiPort),
- errorChannel: errorChannel,
- identityHandler: identity_selector.NewHandler(
- di.IdentityManager,
- di.MysteriumAPI,
- identity.NewIdentityCache(nodeOptions.Directories.Keystore, "remember.json"),
- di.SignerFactory),
- ap: parseAccessPolicyFlag(ctx),
+ tequilapi: client.NewClient(nodeOptions.TequilapiAddress, nodeOptions.TequilapiPort),
+ errorChannel: errorChannel,
+ identityHandler: di.IdentitySelector,
+ ap: parseAccessPolicyFlag(ctx),
}
go func() { | 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package service
import (
"fmt"
"os"
"strings"
"github.com/mysteriumnetwork/node/cmd"
"github.com/mysteriumnetwork/node/cmd/commands/license"
"github.com/mysteriumnetwork/node/core/service"
"github.com/mysteriumnetwork/node/identity"
identity_selector "github.com/mysteriumnetwork/node/identity/selector"
"github.com/mysteriumnetwork/node/metadata"
openvpn_service "github.com/mysteriumnetwork/node/services/openvpn/service"
wireguard_service "github.com/mysteriumnetwork/node/services/wireguard/service"
"github.com/mysteriumnetwork/node/tequilapi/client"
"github.com/urfave/cli"
)
const serviceCommandName = "service"
var (
identityFlag = cli.StringFlag{
Name: "identity",
Usage: "Keystore's identity used to provide service. If not given identity will be created automatically",
Value: "",
}
identityPassphraseFlag = cli.StringFlag{
Name: "identity.passphrase",
Usage: "Used to unlock keystore's identity",
Value: "",
}
agreedTermsConditionsFlag = cli.BoolFlag{
Name: "agreed-terms-and-conditions",
Usage: "Agree with terms & conditions",
}
accessPolicyListFlag = cli.StringFlag{
Name: "access-policy.list",
Usage: "Comma separated list that determines the allowed identities on our service.",
Value: "",
}
)
// NewCommand function creates service command
func NewCommand(licenseCommandName string) *cli.Command {
var di cmd.Dependencies
command := &cli.Command{
Name: serviceCommandName,
Usage: "Starts and publishes services on Mysterium Network",
ArgsUsage: "comma separated list of services to start",
Action: func(ctx *cli.Context) error {
if !ctx.Bool(agreedTermsConditionsFlag.Name) {
printTermWarning(licenseCommandName)
os.Exit(2)
}
errorChannel := make(chan error)
nodeOptions := cmd.ParseFlagsNode(ctx)
if err := di.Bootstrap(nodeOptions); err != nil {
return err
}
go func() { errorChannel <- di.Node.Wait() }()
cmd.RegisterSignalCallback(func() { errorChannel <- nil })
cmdService := &serviceCommand{
tequilapi: client.NewClient(nodeOptions.TequilapiAddress, nodeOptions.TequilapiPort),
errorChannel: errorChannel,
identityHandler: identity_selector.NewHandler(
di.IdentityManager,
di.MysteriumAPI,
identity.NewIdentityCache(nodeOptions.Directories.Keystore, "remember.json"),
di.SignerFactory),
ap: parseAccessPolicyFlag(ctx),
}
go func() {
errorChannel <- cmdService.Run(ctx)
}()
return <-errorChannel
},
After: func(ctx *cli.Context) error {
return di.Shutdown()
},
}
registerFlags(&command.Flags)
return command
}
// serviceCommand represent entrypoint for service command with top level components
type serviceCommand struct {
identityHandler identity_selector.Handler
tequilapi *client.Client
errorChannel chan error
ap client.AccessPoliciesRequest
}
// Run runs a command
func (sc *serviceCommand) Run(ctx *cli.Context) (err error) {
arg := ctx.Args().Get(0)
if arg != "" {
serviceTypes = strings.Split(arg, ",")
}
identity, err := sc.unlockIdentity(parseIdentityFlags(ctx))
if err != nil {
return err
}
if err := sc.runServices(ctx, identity.Address, serviceTypes); err != nil {
return err
}
return <-sc.errorChannel
}
func (sc *serviceCommand) unlockIdentity(identityOptions service.OptionsIdentity) (identity.Identity, error) {
loadIdentity := identity_selector.NewLoader(sc.identityHandler, identityOptions.Identity, identityOptions.Passphrase)
return loadIdentity()
}
func (sc *serviceCommand) runServices(ctx *cli.Context, providerID string, serviceTypes []string) error {
for _, serviceType := range serviceTypes {
options, err := parseFlagsByServiceType(ctx, serviceType)
if err != nil {
return err
}
go sc.runService(providerID, serviceType, options)
}
return nil
}
func (sc *serviceCommand) runService(providerID, serviceType string, options service.Options) {
_, err := sc.tequilapi.ServiceStart(providerID, serviceType, options, sc.ap)
if err != nil {
sc.errorChannel <- err
}
}
// registerFlags function register service flags to flag list
func registerFlags(flags *[]cli.Flag) {
*flags = append(*flags,
agreedTermsConditionsFlag,
identityFlag, identityPassphraseFlag,
accessPolicyListFlag,
)
openvpn_service.RegisterFlags(flags)
wireguard_service.RegisterFlags(flags)
}
// parseIdentityFlags function fills in service command options from CLI context
func parseIdentityFlags(ctx *cli.Context) service.OptionsIdentity {
return service.OptionsIdentity{
Identity: ctx.String(identityFlag.Name),
Passphrase: ctx.String(identityPassphraseFlag.Name),
}
}
// parseAccessPolicyFlag fetches the access policy data from CLI context
func parseAccessPolicyFlag(ctx *cli.Context) client.AccessPoliciesRequest {
policies := ctx.String(accessPolicyListFlag.Name)
if policies == "" {
return client.AccessPoliciesRequest{}
}
splits := strings.Split(policies, ",")
return client.AccessPoliciesRequest{
IDs: splits,
}
}
func parseFlagsByServiceType(ctx *cli.Context, serviceType string) (service.Options, error) {
if f, ok := serviceTypesFlagsParser[serviceType]; ok {
return f(ctx), nil
}
return service.OptionsIdentity{}, fmt.Errorf("unknown service type: %q", serviceType)
}
func printTermWarning(licenseCommandName string) {
fmt.Println(metadata.VersionAsSummary(metadata.LicenseCopyright(
"run program with 'myst "+licenseCommandName+" --"+license.LicenseWarrantyFlag.Name+"' option",
"run program with 'myst "+licenseCommandName+" --"+license.LicenseConditionsFlag.Name+"' option",
)))
fmt.Println()
fmt.Println("If you agree with these Terms & Conditions, run program again with '--agreed-terms-and-conditions' flag")
}
| 1 | 14,436 | You dont use this dependency anymore | mysteriumnetwork-node | go |
@@ -86,6 +86,8 @@ public abstract class AbstractSmartStoreTest extends SmartStoreTestCase {
assertTrue("ENABLE_FTS4 flag not found in compile options", compileOptions.contains("ENABLE_FTS4"));
assertTrue("ENABLE_FTS3_PARENTHESIS flag not found in compile options", compileOptions.contains("ENABLE_FTS3_PARENTHESIS"));
+ assertTrue("ENABLE_FTS5 flag not found in compile options", compileOptions.contains("ENABLE_FTS5"));
+ assertTrue("ENABLE_JSON1 flag not found in compile options", compileOptions.contains("ENABLE_JSON1"));
}
/** | 1 | /*
* Copyright (c) 2011, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.store;
import android.database.Cursor;
import android.os.SystemClock;
import com.salesforce.androidsdk.smartstore.store.DBHelper;
import com.salesforce.androidsdk.smartstore.store.IndexSpec;
import com.salesforce.androidsdk.smartstore.store.QuerySpec;
import com.salesforce.androidsdk.smartstore.store.QuerySpec.Order;
import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartstore.store.SmartStore.Type;
import com.salesforce.androidsdk.util.test.JSONTestHelper;
import net.sqlcipher.database.SQLiteDatabase;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.util.ArrayList;
/**
* Abstract super class for plain and encrypted smart store tests
*
*/
public abstract class AbstractSmartStoreTest extends SmartStoreTestCase {
private static final String TEST_SOUP = "test_soup";
private static final String OTHER_TEST_SOUP = "other_test_soup";
private static final String THIRD_TEST_SOUP = "third_test_soup";
private static final String FOURTH_TEST_SOUP = "fourth_test_soup";
@Override
public void setUp() throws Exception {
super.setUp();
assertFalse("Table for test_soup should not exist", hasTable("TABLE_1"));
assertFalse("Soup test_soup should not exist", store.hasSoup(TEST_SOUP));
store.registerSoup(TEST_SOUP, new IndexSpec[] {new IndexSpec("key", Type.string)});
assertEquals("Table for test_soup was expected to be called TABLE_1", "TABLE_1", getSoupTableName(TEST_SOUP));
assertTrue("Table for test_soup should now exist", hasTable("TABLE_1"));
assertTrue("Soup test_soup should now exist", store.hasSoup(TEST_SOUP));
}
/**
* Checking compile options
*/
public void testCompileOptions() {
ArrayList<String> compileOptions = new ArrayList<String>();
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getPasscode());
c = db.rawQuery("PRAGMA compile_options", null);
for (c.moveToFirst(); c.moveToNext(); ) {
compileOptions.add(c.getString(0));
}
}
finally {
safeClose(c);
}
assertTrue("ENABLE_FTS4 flag not found in compile options", compileOptions.contains("ENABLE_FTS4"));
assertTrue("ENABLE_FTS3_PARENTHESIS flag not found in compile options", compileOptions.contains("ENABLE_FTS3_PARENTHESIS"));
}
/**
* Testing method with paths to top level string/integer/array/map as well as edge cases (null object/null or empty path)
* @throws JSONException
*/
public void testProjectTopLevel() throws JSONException {
JSONObject json = new JSONObject("{'a':'va', 'b':2, 'c':[0,1,2], 'd': {'d1':'vd1', 'd2':'vd2', 'd3':[1,2], 'd4':{'e':5}}}");
// Null object
assertNull("Should have been null", SmartStore.project(null, "path"));
// Root
JSONTestHelper.assertSameJSON("Should have returned whole object", json, SmartStore.project(json, null));
JSONTestHelper.assertSameJSON("Should have returned whole object", json, SmartStore.project(json, ""));
// Top-level elements
assertEquals("Wrong value for key a", "va", SmartStore.project(json, "a"));
assertEquals("Wrong value for key b", 2, SmartStore.project(json, "b"));
JSONTestHelper.assertSameJSON("Wrong value for key c", new JSONArray("[0,1,2]"), SmartStore.project(json, "c"));
JSONTestHelper.assertSameJSON("Wrong value for key d", new JSONObject("{'d1':'vd1','d2':'vd2','d3':[1,2],'d4':{'e':5}}"), (JSONObject) SmartStore.project(json, "d"));
}
/**
* Testing method with paths to non-top level string/integer/array/map
* @throws JSONException
*/
public void testProjectNested() throws JSONException {
JSONObject json = new JSONObject("{'a':'va', 'b':2, 'c':[0,1,2], 'd': {'d1':'vd1', 'd2':'vd2', 'd3':[1,2], 'd4':{'e':5}}}");
// Nested elements
assertEquals("Wrong value for key d.d1", "vd1", SmartStore.project(json, "d.d1"));
assertEquals("Wrong value for key d.d2", "vd2", SmartStore.project(json, "d.d2"));
JSONTestHelper.assertSameJSON("Wrong value for key d.d3", new JSONArray("[1,2]"), SmartStore.project(json, "d.d3"));
JSONTestHelper.assertSameJSON("Wrong value for key d.d4", new JSONObject("{'e':5}"), SmartStore.project(json, "d.d4"));
assertEquals("Wrong value for key d.d4.e", 5, SmartStore.project(json, "d.d4.e"));
}
/**
* Testing method with path through arrays
* @throws JSONException
*/
public void testProjectThroughArrays() throws JSONException {
JSONObject json = new JSONObject("{\"a\":\"a1\", \"b\":2, \"c\":[{\"cc\":\"cc1\"}, {\"cc\":2}, {\"cc\":[1,2,3]}, {}, {\"cc\":{\"cc5\":5}}], \"d\":[{\"dd\":[{\"ddd\":\"ddd11\"},{\"ddd\":\"ddd12\"}]}, {\"dd\":[{\"ddd\":\"ddd21\"}]}, {\"dd\":[{\"ddd\":\"ddd31\"},{\"ddd3\":\"ddd32\"}]}]}");
JSONTestHelper.assertSameJSON("Wrong value for key c", new JSONArray("[{\"cc\":\"cc1\"}, {\"cc\":2}, {\"cc\":[1,2,3]}, {}, {\"cc\":{\"cc5\":5}}]"), SmartStore.project(json, "c"));
JSONTestHelper.assertSameJSON("Wrong value for key c.cc", new JSONArray("[\"cc1\",2, [1,2,3], {\"cc5\":5}]"), SmartStore.project(json, "c.cc"));
JSONTestHelper.assertSameJSON("Wrong value for key c.cc.cc5", new JSONArray("[5]"), SmartStore.project(json, "c.cc.cc5"));
JSONTestHelper.assertSameJSON("Wrong value for key d", new JSONArray("[{\"dd\":[{\"ddd\":\"ddd11\"},{\"ddd\":\"ddd12\"}]}, {\"dd\":[{\"ddd\":\"ddd21\"}]}, {\"dd\":[{\"ddd\":\"ddd31\"},{\"ddd3\":\"ddd32\"}]}]"), SmartStore.project(json, "d"));
JSONTestHelper.assertSameJSON("Wrong value for key d.dd", new JSONArray("[[{\"ddd\":\"ddd11\"},{\"ddd\":\"ddd12\"}], [{\"ddd\":\"ddd21\"}], [{\"ddd\":\"ddd31\"},{\"ddd3\":\"ddd32\"}]]"), SmartStore.project(json, "d.dd"));
JSONTestHelper.assertSameJSON("Wrong value for key d.dd.ddd", new JSONArray("[[\"ddd11\",\"ddd12\"],[\"ddd21\"],[\"ddd31\"]]"), SmartStore.project(json, "d.dd.ddd"));
JSONTestHelper.assertSameJSON("Wrong value for key d.dd.ddd3", new JSONArray("[[\"ddd32\"]]"), SmartStore.project(json, "d.dd.ddd3"));
}
/**
* Check that the meta data table (soup index map) has been created
*/
public void testMetaDataTableCreated() {
assertTrue("Table soup_index_map not found", hasTable("soup_index_map"));
}
/**
* Test register/drop soup
*/
public void testRegisterDropSoup() {
// Before
assertNull("getSoupTableName should have returned null", getSoupTableName(THIRD_TEST_SOUP));
assertFalse("Soup third_test_soup should not exist", store.hasSoup(THIRD_TEST_SOUP));
// Register
store.registerSoup(THIRD_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", Type.string), new IndexSpec("value", Type.string)});
String soupTableName = getSoupTableName(THIRD_TEST_SOUP);
assertEquals("getSoupTableName should have returned TABLE_2", "TABLE_2", soupTableName);
assertTrue("Table for soup third_test_soup does exist", hasTable(soupTableName));
assertTrue("Register soup call failed", store.hasSoup(THIRD_TEST_SOUP));
// Drop
store.dropSoup(THIRD_TEST_SOUP);
// After
assertFalse("Soup third_test_soup should no longer exist", store.hasSoup(THIRD_TEST_SOUP));
assertNull("getSoupTableName should have returned null", getSoupTableName(THIRD_TEST_SOUP));
assertFalse("Table for soup third_test_soup does exist", hasTable(soupTableName));
}
/**
* Testing getAllSoupNames: register a new soup and then drop it and call getAllSoupNames before and after
*/
public void testGetAllSoupNames() {
// Before
assertEquals("One soup name expected", 1, store.getAllSoupNames().size());
assertTrue(TEST_SOUP + " should have been returned by getAllSoupNames", store.getAllSoupNames().contains(TEST_SOUP));
// Register another soup
store.registerSoup(THIRD_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", Type.string), new IndexSpec("value", Type.string)});
assertEquals("Two soup names expected", 2, store.getAllSoupNames().size());
assertTrue(TEST_SOUP + " should have been returned by getAllSoupNames", store.getAllSoupNames().contains(TEST_SOUP));
assertTrue(THIRD_TEST_SOUP + " should have been returned by getAllSoupNames", store.getAllSoupNames().contains(THIRD_TEST_SOUP));
// Drop the latest soup
store.dropSoup(THIRD_TEST_SOUP);
assertEquals("One soup name expected", 1, store.getAllSoupNames().size());
assertTrue(TEST_SOUP + " should have been returned by getAllSoupNames", store.getAllSoupNames().contains(TEST_SOUP));
}
/**
* Testing dropAllSoups: register a couple of soups then drop them all
*/
public void testDropAllSoups() {
// Register another soup
assertEquals("One soup name expected", 1, store.getAllSoupNames().size());
store.registerSoup(THIRD_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", Type.string), new IndexSpec("value", Type.string)});
assertEquals("Two soup names expected", 2, store.getAllSoupNames().size());
// Drop all
store.dropAllSoups();
assertEquals("No soup name expected", 0, store.getAllSoupNames().size());
assertFalse("Soup " + THIRD_TEST_SOUP + " should no longer exist", store.hasSoup(THIRD_TEST_SOUP));
assertFalse("Soup " + TEST_SOUP + " should no longer exist", store.hasSoup(TEST_SOUP));
}
/**
* Testing create: create a single element with a single index pointing to a top level attribute
* @throws JSONException
*/
public void testCreateOne() throws JSONException {
JSONObject soupElt = new JSONObject("{'key':'ka', 'value':'va'}");
JSONObject soupEltCreated = store.create(TEST_SOUP, soupElt);
// Check DB
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getPasscode());
String soupTableName = getSoupTableName(TEST_SOUP);
c = DBHelper.getInstance(db).query(db, soupTableName, null, null, null, null);
assertTrue("Expected a soup element", c.moveToFirst());
assertEquals("Expected one soup element only", 1, c.getCount());
assertEquals("Wrong id", idOf(soupEltCreated), c.getLong(c.getColumnIndex("id")));
assertEquals("Wrong created date", soupEltCreated.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
assertEquals("Wrong value in index column", "ka", c.getString(c.getColumnIndex(soupTableName + "_0")));
JSONTestHelper.assertSameJSON("Wrong value in soup column", soupEltCreated, new JSONObject(c.getString(c.getColumnIndex("soup"))));
assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
}
finally {
safeClose(c);
}
}
/**
* Testing create: create multiple elements with multiple indices not just pointing to top level attributes
* @throws JSONException
*/
public void testCreateMultiple() throws JSONException {
assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP));
store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("lastName", Type.string), new IndexSpec("address.city", Type.string)});
assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP));
JSONObject soupElt1 = new JSONObject("{'lastName':'Doe', 'address':{'city':'San Francisco','street':'1 market'}}");
JSONObject soupElt2 = new JSONObject("{'lastName':'Jackson', 'address':{'city':'Los Angeles','street':'100 mission'}}");
JSONObject soupElt3 = new JSONObject("{'lastName':'Watson', 'address':{'city':'London','street':'50 market'}}");
JSONObject soupElt1Created = store.create(OTHER_TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(OTHER_TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(OTHER_TEST_SOUP, soupElt3);
// Check DB
Cursor c = null;
try {
String soupTableName = getSoupTableName(OTHER_TEST_SOUP);
assertEquals("Table for other_test_soup was expected to be called TABLE_2", "TABLE_2", soupTableName);
assertTrue("Table for other_test_soup should now exist", hasTable("TABLE_2"));
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getPasscode());
c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null);
assertTrue("Expected a soup element", c.moveToFirst());
assertEquals("Expected three soup elements", 3, c.getCount());
assertEquals("Wrong id", idOf(soupElt1Created), c.getLong(c.getColumnIndex("id")));
assertEquals("Wrong created date", soupElt1Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
assertEquals("Wrong value in index column", "Doe", c.getString(c.getColumnIndex(soupTableName + "_0")));
assertEquals("Wrong value in index column", "San Francisco", c.getString(c.getColumnIndex(soupTableName + "_1")));
JSONTestHelper.assertSameJSON("Wrong value in soup column", soupElt1Created, new JSONObject(c.getString(c.getColumnIndex("soup"))));
c.moveToNext();
assertEquals("Wrong id", idOf(soupElt2Created), c.getLong(c.getColumnIndex("id")));
assertEquals("Wrong created date", soupElt2Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
assertEquals("Wrong value in index column", "Jackson", c.getString(c.getColumnIndex(soupTableName + "_0")));
assertEquals("Wrong value in index column", "Los Angeles", c.getString(c.getColumnIndex(soupTableName + "_1")));
JSONTestHelper.assertSameJSON("Wrong value in soup column", soupElt2Created, new JSONObject(c.getString(c.getColumnIndex("soup"))));
c.moveToNext();
assertEquals("Wrong id", idOf(soupElt3Created), c.getLong(c.getColumnIndex("id")));
assertEquals("Wrong created date", soupElt3Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
assertEquals("Wrong value in index column", "Watson", c.getString(c.getColumnIndex(soupTableName + "_0")));
assertEquals("Wrong value in index column", "London", c.getString(c.getColumnIndex(soupTableName + "_1")));
JSONTestHelper.assertSameJSON("Wrong value in soup column", soupElt3Created, new JSONObject(c.getString(c.getColumnIndex("soup"))));
}
finally {
safeClose(c);
}
}
/**
* Testing update: create multiple soup elements and update one of them, check them all
* @throws JSONException
*/
public void testUpdate() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
SystemClock.sleep(10); // to get a different last modified date
JSONObject soupElt2ForUpdate = new JSONObject("{'key':'ka2u', 'value':'va2u'}");
JSONObject soupElt2Updated = store.update(TEST_SOUP, soupElt2ForUpdate, idOf(soupElt2Created));
JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created)).getJSONObject(0);
JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created)).getJSONObject(0);
JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Created, soupElt1Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Updated, soupElt2Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Created, soupElt3Retrieved);
// Check DB
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getPasscode());
String soupTableName = getSoupTableName(TEST_SOUP);
c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null);
assertTrue("Expected a soup element", c.moveToFirst());
assertEquals("Expected three soup elements", 3, c.getCount());
assertEquals("Wrong id", idOf(soupElt1Created), c.getLong(c.getColumnIndex("id")));
assertEquals("Wrong created date", soupElt1Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
c.moveToNext();
assertEquals("Wrong id", idOf(soupElt2Created), c.getLong(c.getColumnIndex("id")));
assertEquals("Wrong created date", soupElt2Updated.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
assertTrue("Last modified date should be more recent than created date", c.getLong(c.getColumnIndex("created")) < c.getLong(c.getColumnIndex("lastModified")));
c.moveToNext();
assertEquals("Wrong id", idOf(soupElt3Created), c.getLong(c.getColumnIndex("id")));
assertEquals("Wrong created date", soupElt3Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
}
finally {
safeClose(c);
}
}
/**
* Testing upsert: upsert multiple soup elements and re-upsert one of them, check them all
* @throws JSONException
*/
public void testUpsert() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Upserted = store.upsert(TEST_SOUP, soupElt1);
JSONObject soupElt2Upserted = store.upsert(TEST_SOUP, soupElt2);
JSONObject soupElt3Upserted = store.upsert(TEST_SOUP, soupElt3);
SystemClock.sleep(10); // to get a different last modified date
JSONObject soupElt2ForUpdate = new JSONObject("{'key':'ka2u', 'value':'va2u', '_soupEntryId': " + idOf(soupElt2Upserted) + "}");
JSONObject soupElt2Updated = store.upsert(TEST_SOUP, soupElt2ForUpdate);
JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Upserted)).getJSONObject(0);
JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Upserted)).getJSONObject(0);
JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Upserted)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Upserted, soupElt1Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Updated, soupElt2Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Upserted, soupElt3Retrieved);
// Check DB
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getPasscode());
String soupTableName = getSoupTableName(TEST_SOUP);
c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null);
assertTrue("Expected a soup element", c.moveToFirst());
assertEquals("Expected three soup elements", 3, c.getCount());
assertEquals("Wrong id", idOf(soupElt1Upserted), c.getLong(c.getColumnIndex("id")));
assertEquals("Wrong created date", soupElt1Upserted.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
c.moveToNext();
assertEquals("Wrong id", idOf(soupElt2Upserted), c.getLong(c.getColumnIndex("id")));
assertEquals("Wrong created date", soupElt2Updated.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
assertTrue("Last modified date should be more recent than created date", c.getLong(c.getColumnIndex("created")) < c.getLong(c.getColumnIndex("lastModified")));
c.moveToNext();
assertEquals("Wrong id", idOf(soupElt3Upserted), c.getLong(c.getColumnIndex("id")));
assertEquals("Wrong created date", soupElt3Upserted.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
}
finally {
safeClose(c);
}
}
/**
* Testing upsert with external id: upsert multiple soup elements and re-upsert one of them, check them all
* @throws JSONException
*/
public void testUpsertWithExternalId() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Upserted = store.upsert(TEST_SOUP, soupElt1, "key");
JSONObject soupElt2Upserted = store.upsert(TEST_SOUP, soupElt2, "key");
JSONObject soupElt3Upserted = store.upsert(TEST_SOUP, soupElt3, "key");
SystemClock.sleep(10); // to get a different last modified date
JSONObject soupElt2ForUpdate = new JSONObject("{'key':'ka2', 'value':'va2u'}");
JSONObject soupElt2Updated = store.upsert(TEST_SOUP, soupElt2ForUpdate, "key");
JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Upserted)).getJSONObject(0);
JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Upserted)).getJSONObject(0);
JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Upserted)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Upserted, soupElt1Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Updated, soupElt2Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Upserted, soupElt3Retrieved);
// Check DB
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getPasscode());
String soupTableName = getSoupTableName(TEST_SOUP);
c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null);
assertTrue("Expected a soup element", c.moveToFirst());
assertEquals("Expected three soup elements", 3, c.getCount());
assertEquals("Wrong id", idOf(soupElt1Upserted), c.getLong(c.getColumnIndex("id")));
assertEquals("Wrong created date", soupElt1Upserted.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
c.moveToNext();
assertEquals("Wrong id", idOf(soupElt2Upserted), c.getLong(c.getColumnIndex("id")));
assertEquals("Wrong created date", soupElt2Updated.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
assertTrue("Last modified date should be more recent than created date", c.getLong(c.getColumnIndex("created")) < c.getLong(c.getColumnIndex("lastModified")));
c.moveToNext();
assertEquals("Wrong id", idOf(soupElt3Upserted), c.getLong(c.getColumnIndex("id")));
assertEquals("Wrong created date", soupElt3Upserted.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
}
finally {
safeClose(c);
}
}
/**
* Testing upsert passing a non-indexed path for the external id (should fail)
* @throws JSONException
*/
public void testUpsertWithNonIndexedExternalId() throws JSONException {
JSONObject soupElt = new JSONObject("{'key':'ka1', 'value':'va1'}");
try {
store.upsert(TEST_SOUP, soupElt, "value");
fail("Exception was expected: value is not an indexed field");
} catch (RuntimeException e) {
assertTrue("Wrong exception", e.getMessage().contains("does not have an index"));
}
}
/**
* Testing upsert with an external id that is not unique in the soup
* @throws JSONException
*/
public void testUpsertWithNonUniqueExternalId() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka', 'value':'va3'}");
JSONObject soupElt1Upserted = store.upsert(TEST_SOUP, soupElt1);
JSONObject soupElt2Upserted = store.upsert(TEST_SOUP, soupElt2);
JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Upserted)).getJSONObject(0);
JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Upserted)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Upserted, soupElt1Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Upserted, soupElt2Retrieved);
try {
store.upsert(TEST_SOUP, soupElt3, "key");
fail("Exception was expected: key is not unique in the soup");
} catch (RuntimeException e) {
assertTrue("Wrong exception", e.getMessage().contains("are more than one soup elements"));
}
}
/**
* Testing retrieve: create multiple soup elements and retrieves them back
* @throws JSONException
*/
public void testRetrieve() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created)).getJSONObject(0);
JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created)).getJSONObject(0);
JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Created, soupElt1Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Created, soupElt2Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Created, soupElt3Retrieved);
}
/**
* Testing delete: create a soup element, deletes and check database directly that it is in fact gone
* @throws JSONException
*/
public void testDelete() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
store.delete(TEST_SOUP, idOf(soupElt2Created));
JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created)).getJSONObject(0);
JSONArray soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created));
JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Created, soupElt1Retrieved);
assertEquals("Should be empty", 0, soupElt2Retrieved.length());
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Created, soupElt3Retrieved);
// Check DB
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getPasscode());
String soupTableName = getSoupTableName(TEST_SOUP);
c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null);
assertTrue("Expected a soup element", c.moveToFirst());
assertEquals("Expected three soup elements", 2, c.getCount());
assertEquals("Wrong id", idOf(soupElt1Created), c.getLong(c.getColumnIndex("id")));
c.moveToNext();
assertEquals("Wrong id", idOf(soupElt3Created), c.getLong(c.getColumnIndex("id")));
} finally {
safeClose(c);
}
}
/**
* Testing clear soup: create soup elements, clear soup and check database directly that there are in fact gone
* @throws JSONException
*/
public void testClearSoup() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
store.clearSoup(TEST_SOUP);
JSONArray soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created));
JSONArray soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created));
JSONArray soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created));
assertEquals("Should be empty", 0, soupElt1Retrieved.length());
assertEquals("Should be empty", 0, soupElt2Retrieved.length());
assertEquals("Should be empty", 0, soupElt3Retrieved.length());
// Check DB
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getPasscode());
String soupTableName = getSoupTableName(TEST_SOUP);
c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null);
assertFalse("Expected no soup element", c.moveToFirst());
} finally {
safeClose(c);
}
}
/**
* Test query when looking for all elements
* @throws JSONException
*/
public void testAllQuery() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1', 'otherValue':'ova1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2', 'otherValue':'ova2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3', 'otherValue':'ova3'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
// Query all - small page
JSONArray result = store.query(QuerySpec.buildAllQuerySpec(TEST_SOUP, "key", Order.ascending, 2), 0);
assertEquals("Two elements expected", 2, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt1Created, result.getJSONObject(0));
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt2Created, result.getJSONObject(1));
// Query all - next small page
result = store.query(QuerySpec.buildAllQuerySpec(TEST_SOUP, "key", Order.ascending, 2), 1);
assertEquals("One element expected", 1, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt3Created, result.getJSONObject(0));
// Query all - large page
result = store.query(QuerySpec.buildAllQuerySpec(TEST_SOUP, "key", Order.ascending, 10), 0);
assertEquals("Three elements expected", 3, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt1Created, result.getJSONObject(0));
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt2Created, result.getJSONObject(1));
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt3Created, result.getJSONObject(2));
}
/**
* Test query when looking for a specific element
* @throws JSONException
*/
public void testMatchQuery() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1', 'otherValue':'ova1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2', 'otherValue':'ova2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3', 'otherValue':'ova3'}");
store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created= store.create(TEST_SOUP, soupElt2);
store.create(TEST_SOUP, soupElt3);
// Exact match
JSONArray result = store.query(QuerySpec.buildExactQuerySpec(TEST_SOUP, "key", "ka2", null, null, 10), 0);
assertEquals("One result expected", 1, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt2Created, result.getJSONObject(0));
}
/**
* Query test looking for a range of elements (with ascending or descending ordering)
* @throws JSONException
*/
public void testRangeQuery() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1', 'otherValue':'ova1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2', 'otherValue':'ova2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3', 'otherValue':'ova3'}");
/*JSONObjectsoupElt1Created = */store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
// Range query
JSONArray result = store.query(QuerySpec.buildRangeQuerySpec(TEST_SOUP, "key", "ka2", "ka3", "key", Order.ascending, 10), 0);
assertEquals("Two results expected", 2, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt2Created, result.getJSONObject(0));
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt3Created, result.getJSONObject(1));
// Range query - descending order
result = store.query(QuerySpec.buildRangeQuerySpec(TEST_SOUP, "key", "ka2", "ka3", "key", Order.descending, 10), 0);
assertEquals("Two results expected", 2, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt3Created, result.getJSONObject(0));
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt2Created, result.getJSONObject(1));
}
/**
* Query test looking using like (with ascending or descending ordering)
* @throws JSONException
*/
public void testLikeQuery() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'abcd', 'value':'va1', 'otherValue':'ova1'}");
JSONObject soupElt2 = new JSONObject("{'key':'bbcd', 'value':'va2', 'otherValue':'ova2'}");
JSONObject soupElt3 = new JSONObject("{'key':'abcc', 'value':'va3', 'otherValue':'ova3'}");
JSONObject soupElt4 = new JSONObject("{'key':'defg', 'value':'va4', 'otherValue':'ova3'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
/*JSONObject soupElt4Created = */ store.create(TEST_SOUP, soupElt4);
// Like query (starts with)
JSONArray result = store.query(QuerySpec.buildLikeQuerySpec(TEST_SOUP, "key", "abc%", "key", Order.ascending, 10), 0);
assertEquals("Two results expected", 2, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt3Created, result.getJSONObject(0));
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt1Created, result.getJSONObject(1));
// Like query (ends with)
result = store.query(QuerySpec.buildLikeQuerySpec(TEST_SOUP, "key", "%bcd", "key", Order.ascending, 10), 0);
assertEquals("Two results expected", 2, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt1Created, result.getJSONObject(0));
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt2Created, result.getJSONObject(1));
// Like query (starts with) - descending order
result = store.query(QuerySpec.buildLikeQuerySpec(TEST_SOUP, "key", "abc%", "key", Order.descending, 10), 0);
assertEquals("Two results expected", 2, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt1Created, result.getJSONObject(0));
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt3Created, result.getJSONObject(1));
// Like query (ends with) - descending order
result = store.query(QuerySpec.buildLikeQuerySpec(TEST_SOUP, "key", "%bcd", "key", Order.descending, 10), 0);
assertEquals("Two results expected", 2, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt2Created, result.getJSONObject(0));
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt1Created, result.getJSONObject(1));
// Like query (contains)
result = store.query(QuerySpec.buildLikeQuerySpec(TEST_SOUP, "key", "%bc%", "key", Order.ascending, 10), 0);
assertEquals("Three results expected", 3, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt3Created, result.getJSONObject(0));
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt1Created, result.getJSONObject(1));
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt2Created, result.getJSONObject(2));
// Like query (contains) - descending order
result = store.query(QuerySpec.buildLikeQuerySpec(TEST_SOUP, "key", "%bc%", "key", Order.descending, 10), 0);
assertEquals("Three results expected", 3, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt2Created, result.getJSONObject(0));
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt1Created, result.getJSONObject(1));
JSONTestHelper.assertSameJSON("Wrong result for query", soupElt3Created, result.getJSONObject(2));
}
/**
* Test upsert soup element with null value in indexed field
* @throws JSONException
*/
public void testUpsertWithNullInIndexedField() throws JSONException {
// Before
assertFalse("Soup third_test_soup should not exist", store.hasSoup(THIRD_TEST_SOUP));
// Register
store.registerSoup(THIRD_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", Type.string), new IndexSpec("value", Type.string)});
assertTrue("Register soup call failed", store.hasSoup(THIRD_TEST_SOUP));
// Upsert
JSONObject soupElt1 = new JSONObject("{'key':'ka', 'value':null}");
JSONObject soupElt1Upserted = store.upsert(THIRD_TEST_SOUP, soupElt1);
// Check
JSONObject soupElt1Retrieved = store.retrieve(THIRD_TEST_SOUP, idOf(soupElt1Upserted)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Upserted, soupElt1Retrieved);
}
/**
* Test to verify an aggregate query on floating point values.
*
* @throws JSONException
*/
public void testAggregateQueryOnIndexedField() throws JSONException {
final JSONObject soupElt1 = new JSONObject("{'amount':10.2}");
final JSONObject soupElt2 = new JSONObject("{'amount':9.9}");
final IndexSpec[] indexSpecs = { new IndexSpec("amount", Type.floating) };
store.registerSoup(FOURTH_TEST_SOUP, indexSpecs);
assertTrue("Soup " + FOURTH_TEST_SOUP + " should have been created", store.hasSoup(FOURTH_TEST_SOUP));
store.upsert(FOURTH_TEST_SOUP, soupElt1);
store.upsert(FOURTH_TEST_SOUP, soupElt2);
final String smartSql = "SELECT SUM({" + FOURTH_TEST_SOUP + ":amount}) FROM {" + FOURTH_TEST_SOUP + "}";
final QuerySpec querySpec = QuerySpec.buildSmartQuerySpec(smartSql, 1);
final JSONArray result = store.query(querySpec, 0);
assertNotNull("Result should not be null", result);
assertEquals("One result expected", 1, result.length());
assertEquals("Incorrect result received", 20.1, result.getJSONArray(0).getDouble(0));
store.dropSoup(FOURTH_TEST_SOUP);
assertFalse("Soup " + FOURTH_TEST_SOUP + " should have been deleted", store.hasSoup(FOURTH_TEST_SOUP));
}
/**
* Test to verify an count query for a query with group by.
*
* @throws JSONException
*/
public void testCountQueryWithGroupBy() throws JSONException {
// Before
assertFalse("Soup third_test_soup should not exist", store.hasSoup(THIRD_TEST_SOUP));
// Register
store.registerSoup(THIRD_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", Type.string), new IndexSpec("value", Type.string)});
assertTrue("Register soup call failed", store.hasSoup(THIRD_TEST_SOUP));
JSONObject soupElt1 = new JSONObject("{'key':'a', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'b', 'value':'va1'}");
JSONObject soupElt3 = new JSONObject("{'key':'c', 'value':'va2'}");
JSONObject soupElt4 = new JSONObject("{'key':'d', 'value':'va3'}");
JSONObject soupElt5 = new JSONObject("{'key':'e', 'value':'va3'}");
store.create(THIRD_TEST_SOUP, soupElt1);
store.create(THIRD_TEST_SOUP, soupElt2);
store.create(THIRD_TEST_SOUP, soupElt3);
store.create(THIRD_TEST_SOUP, soupElt4);
store.create(THIRD_TEST_SOUP, soupElt5);
final String smartSql = "SELECT {" + THIRD_TEST_SOUP + ":value}, count(*) FROM {" + THIRD_TEST_SOUP + "} GROUP BY {" + THIRD_TEST_SOUP + ":value} ORDER BY {" + THIRD_TEST_SOUP + ":value}";
final QuerySpec querySpec = QuerySpec.buildSmartQuerySpec(smartSql, 25);
final JSONArray result = store.query(querySpec, 0);
assertNotNull("Result should not be null", result);
assertEquals("Three results expected", 3, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", new JSONArray("[['va1', 2], ['va2', 1], ['va3', 2]]"), result);
final int count = store.countQuery(querySpec);
assertEquals("Incorrect count query", "SELECT count(*) FROM (" + smartSql + ")", querySpec.countSmartSql);
assertEquals("Incorrect count", 3, count);
}
/**
* Test to verify proper indexing of integer and longs
*/
public void testIntegerIndexedField() throws JSONException {
store.registerSoup(FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.integer) });
tryNumber(Type.integer, Integer.MIN_VALUE, Integer.MIN_VALUE);
tryNumber(Type.integer, Integer.MAX_VALUE, Integer.MAX_VALUE);
tryNumber(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumber(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumber(Type.integer, Double.MIN_VALUE, (long) Double.MIN_VALUE);
tryNumber(Type.integer, Double.MAX_VALUE, (long) Double.MAX_VALUE);
}
/**
* Test to verify proper indexing of doubles
*/
public void testFloatingIndexedField() throws JSONException {
store.registerSoup(FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.floating) });
tryNumber(Type.floating, Integer.MIN_VALUE, (double) Integer.MIN_VALUE);
tryNumber(Type.floating, Integer.MAX_VALUE, (double) Integer.MAX_VALUE);
tryNumber(Type.floating, Long.MIN_VALUE, (double) Long.MIN_VALUE);
tryNumber(Type.floating, Long.MIN_VALUE, (double) Long.MIN_VALUE);
tryNumber(Type.floating, Double.MIN_VALUE, Double.MIN_VALUE);
tryNumber(Type.floating, Double.MAX_VALUE, Double.MAX_VALUE);
}
/**
* Helper method for testIntegerIndexedField and testFloatingIndexedField
* Insert soup element with number and check db
* @param fieldType
* @param valueIn
* @param valueOut
* @throws JSONException
*/
private void tryNumber(Type fieldType, Number valueIn, Number valueOut) throws JSONException {
JSONObject elt = new JSONObject();
elt.put("amount", valueIn);
Long id = store.upsert(FOURTH_TEST_SOUP, elt).getLong(SmartStore.SOUP_ENTRY_ID);
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getPasscode());
String soupTableName = getSoupTableName(FOURTH_TEST_SOUP);
String amountColumnName = store.getSoupIndexSpecs(FOURTH_TEST_SOUP)[0].columnName;
c = DBHelper.getInstance(db).query(db, soupTableName, new String[] { amountColumnName }, null, null, "id = " + id);
assertTrue("Expected a soup element", c.moveToFirst());
assertEquals("Expected one soup element", 1, c.getCount());
if (fieldType == Type.integer)
assertEquals("Not the value expected", valueOut.longValue(), c.getLong(0));
else if (fieldType == Type.floating)
assertEquals("Not the value expected", valueOut.doubleValue(), c.getDouble(0));
} finally {
safeClose(c);
}
}
/**
* Test using smart sql to retrieve integer indexed fields
*/
public void testIntegerIndexedFieldWithSmartSql() throws JSONException {
store.registerSoup(FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.integer) });
tryNumberWithSmartSql(Type.integer, Integer.MIN_VALUE, Integer.MIN_VALUE);
tryNumberWithSmartSql(Type.integer, Integer.MAX_VALUE, Integer.MAX_VALUE);
tryNumberWithSmartSql(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumberWithSmartSql(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumberWithSmartSql(Type.integer, Double.MIN_VALUE, (long) Double.MIN_VALUE);
tryNumberWithSmartSql(Type.integer, Double.MAX_VALUE, (long) Double.MAX_VALUE);
}
/**
* Test using smart sql to retrieve indexed fields holding doubles
* NB smart sql will return a long when querying a double field that contains a long
*/
public void testFloatingIndexedFieldWithSmartSql() throws JSONException {
store.registerSoup(FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.floating) });
tryNumberWithSmartSql(Type.floating, Integer.MIN_VALUE, Integer.MIN_VALUE);
tryNumberWithSmartSql(Type.floating, Integer.MAX_VALUE, Integer.MAX_VALUE);
tryNumberWithSmartSql(Type.floating, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumberWithSmartSql(Type.floating, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumberWithSmartSql(Type.floating, Double.MIN_VALUE, Double.MIN_VALUE);
tryNumberWithSmartSql(Type.floating, Double.MAX_VALUE, Double.MAX_VALUE);
}
/**
* Helper method for testIntegerIndexedFieldWithSmartSql and testFloatingIndexedFieldWithSmartSql
* Insert soup element with number and retrieve it back using smartsql
* @param fieldType
* @param valueIn
* @param valueOut
* @throws JSONException
*/
private void tryNumberWithSmartSql(Type fieldType, Number valueIn, Number valueOut) throws JSONException {
String smartSql = "SELECT {" + FOURTH_TEST_SOUP + ":amount} FROM {" + FOURTH_TEST_SOUP + "} WHERE {" + FOURTH_TEST_SOUP + ":_soupEntryId} = ";
JSONObject elt = new JSONObject();
elt.put("amount", valueIn);
Long id = store.upsert(FOURTH_TEST_SOUP, elt).getLong(SmartStore.SOUP_ENTRY_ID);
Number actualValueOut = (Number) store.query(QuerySpec.buildSmartQuerySpec(smartSql + id, 1), 0).getJSONArray(0).get(0);
if (fieldType == Type.integer)
assertEquals("Not the value expected", valueOut.longValue(), actualValueOut.longValue());
else if (fieldType == Type.floating)
assertEquals("Not the value expected", valueOut.doubleValue(), actualValueOut.doubleValue());
}
/**
* Test for getDatabaseSize
*
* @throws JSONException
*/
public void testGetDatabaseSize() throws JSONException {
int initialSize = store.getDatabaseSize();
for (int i=0; i<100; i++) {
JSONObject soupElt = new JSONObject("{'key':'abcd" + i + "', 'value':'va" + i + "', 'otherValue':'ova" + i + "'}");
store.create(TEST_SOUP, soupElt);
}
assertTrue("Database should be larger now", store.getDatabaseSize() > initialSize);
}
}
| 1 | 15,211 | Here is the test that checks that the sqlcipher in use was compiled with the right flags | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -199,6 +199,8 @@ public class LoginServerManager {
} catch (Exception e) {
Log.w("LoginServerManager.getLoginServersFromRuntimeConfig",
"Exception thrown while attempting to read array, attempting to read string value instead");
+ }
+ if (mdmLoginServers == null) {
final String loginServer = runtimeConfig.getString(ConfigKey.AppServiceHosts);
if (!TextUtils.isEmpty(loginServer)) {
mdmLoginServers = new String[] {loginServer}; | 1 | /*
* Copyright (c) 2014, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.config;
import android.content.Context;
import android.content.SharedPreferences;
import android.content.SharedPreferences.Editor;
import android.content.res.XmlResourceParser;
import android.text.TextUtils;
import android.util.Log;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.config.RuntimeConfig.ConfigKey;
import com.salesforce.androidsdk.ui.SalesforceR;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* Class to manage login hosts (default and user entered).
*
* @author bhariharan
*/
public class LoginServerManager {
// Default login servers.
public static final String PRODUCTION_LOGIN_URL = "https://login.salesforce.com";
public static final String SANDBOX_LOGIN_URL = "https://test.salesforce.com";
// Legacy keys for login servers properties stored in preferences.
public static final String LEGACY_SERVER_URL_PREFS_SETTINGS = "server_url_prefs";
// Keys used in shared preferences.
private static final String SERVER_URL_FILE = "server_url_file";
private static final String NUMBER_OF_ENTRIES = "number_of_entries";
private static final String SERVER_NAME = "server_name_%d";
private static final String SERVER_URL = "server_url_%d";
private static final String IS_CUSTOM = "is_custom_%d";
private Context ctx;
private LoginServer selectedServer;
private SharedPreferences settings;
/**
* Parameterized constructor.
*
* @param ctx Context.
*/
public LoginServerManager(Context ctx) {
this.ctx = ctx;
settings = ctx.getSharedPreferences(SERVER_URL_FILE,
Context.MODE_PRIVATE);
initSharedPrefFile();
final List<LoginServer> allServers = getLoginServers();
selectedServer = new LoginServer("Production", PRODUCTION_LOGIN_URL, false);
if (allServers != null) {
final LoginServer server = allServers.get(0);
if (server != null) {
selectedServer = server;
}
}
}
/**
* Returns a LoginServer instance from URL.
*
* @param url Server URL.
* @return Matching LoginServer instance if found, or null.
*/
public LoginServer getLoginServerFromURL(String url) {
if (url == null) {
return null;
}
final List<LoginServer> allServers = getLoginServers();
if (allServers != null) {
for (final LoginServer server : allServers) {
if (server != null && url.equals(server.url)) {
return server;
}
}
}
return null;
}
/**
* Returns the selected login server to display.
*
* @return LoginServer instance.
*/
public LoginServer getSelectedLoginServer() {
return selectedServer;
}
/**
* Sets the currently selected login server to display.
*
* @param server LoginServer instance.
*/
public void setSelectedLoginServer(LoginServer server) {
if (server == null) {
return;
}
selectedServer = server;
}
/**
* Selects Sandbox as login server (used in tests).
*/
public void useSandbox() {
final LoginServer sandboxServer = getLoginServerFromURL(SANDBOX_LOGIN_URL);
setSelectedLoginServer(sandboxServer);
}
/**
* Adds a custom login server to the shared pref file.
*
* @param name Server name.
* @param url Server URL.
*/
public void addCustomLoginServer(String name, String url) {
if (name == null || url == null) {
return;
}
int numServers = settings.getInt(NUMBER_OF_ENTRIES, 0);
final Editor edit = settings.edit();
edit.putString(String.format(SERVER_NAME, numServers), name);
edit.putString(String.format(SERVER_URL, numServers), url);
edit.putBoolean(String.format(IS_CUSTOM, numServers), true);
edit.putInt(NUMBER_OF_ENTRIES, ++numServers);
edit.commit();
setSelectedLoginServer(new LoginServer(name, url, true));
}
/**
* Clears all saved custom servers.
*/
public void reset() {
final Editor edit = settings.edit();
edit.clear();
edit.commit();
initSharedPrefFile();
}
/**
* Returns the list of login servers.
* Checks run time configuration first.
* Reads from preferences if no runtime configuration found.
*
* @return List of login servers.
*/
public List<LoginServer> getLoginServers() {
List<LoginServer> allServers = getLoginServersFromRuntimeConfig();
if (allServers == null) {
allServers = getLoginServersFromPreferences();
}
return allServers;
}
/**
* Returns the list of login servers from runtime configuration
* (from MDM provider), if any.
*
* @return List of login servers or null.
*/
public List<LoginServer> getLoginServersFromRuntimeConfig() {
RuntimeConfig runtimeConfig = RuntimeConfig.getRuntimeConfig(ctx);
String[] mdmLoginServers = null;
try {
mdmLoginServers = runtimeConfig.getStringArray(ConfigKey.AppServiceHosts);
} catch (Exception e) {
Log.w("LoginServerManager.getLoginServersFromRuntimeConfig",
"Exception thrown while attempting to read array, attempting to read string value instead");
final String loginServer = runtimeConfig.getString(ConfigKey.AppServiceHosts);
if (!TextUtils.isEmpty(loginServer)) {
mdmLoginServers = new String[] {loginServer};
}
}
final List<LoginServer> allServers = new ArrayList<LoginServer>();
if (mdmLoginServers != null) {
String[] mdmLoginServersLabels = null;
try {
mdmLoginServersLabels = runtimeConfig.getStringArray(ConfigKey.AppServiceHostLabels);
} catch (Exception e) {
Log.w("LoginServerManager.getLoginServersFromRuntimeConfig",
"Exception thrown while attempting to read array, attempting to read string value instead");
final String loginServerLabel = runtimeConfig.getString(ConfigKey.AppServiceHostLabels);
if (!TextUtils.isEmpty(loginServerLabel)) {
mdmLoginServersLabels = new String[] {loginServerLabel};
}
}
if (mdmLoginServersLabels == null || mdmLoginServersLabels.length != mdmLoginServers.length) {
Log.w("LoginServerManager.getLoginServersFromRuntimeConfig",
"No login servers labels provided or wrong number of login servers labels provided - Using URLs for the labels");
mdmLoginServersLabels = mdmLoginServers;
}
for (int i = 0; i < mdmLoginServers.length; i++) {
String name = mdmLoginServersLabels[i];
String url = mdmLoginServers[i];
final LoginServer server = new LoginServer(name, url, false);
allServers.add(server);
}
}
return (allServers.size() > 0 ? allServers : null);
}
/**
* Returns the list of all saved servers, including custom servers.
*
* @return List of all saved servers.
*/
public List<LoginServer> getLoginServersFromPreferences() {
int numServers = settings.getInt(NUMBER_OF_ENTRIES, 0);
if (numServers == 0) {
return null;
}
final List<LoginServer> allServers = new ArrayList<LoginServer>();
for (int i = 0; i < numServers; i++) {
final String name = settings.getString(String.format(SERVER_NAME, i), null);
final String url = settings.getString(String.format(SERVER_URL, i), null);
boolean isCustom = settings.getBoolean(String.format(IS_CUSTOM, i), false);
if (name != null && url != null) {
final LoginServer server = new LoginServer(name, url, isCustom);
allServers.add(server);
}
}
return (allServers.size() > 0 ? allServers : null);
}
/**
* Returns production and sandbox as the login servers
* (only called when servers.xml is missing).
*/
private List<LoginServer> getLegacyLoginServers() {
final SalesforceR salesforceR = SalesforceSDKManager.getInstance().getSalesforceR();
final List<LoginServer> loginServers = new ArrayList<LoginServer>();
final LoginServer productionServer = new LoginServer(ctx.getString(salesforceR.stringAuthLoginProduction()),
PRODUCTION_LOGIN_URL, false);
loginServers.add(productionServer);
final LoginServer sandboxServer = new LoginServer(ctx.getString(salesforceR.stringAuthLoginSandbox()),
SANDBOX_LOGIN_URL, false);
loginServers.add(sandboxServer);
return loginServers;
}
/**
* Returns the list of login servers from XML.
*
* @return Login servers defined in 'res/xml/servers.xml', or null.
*/
private List<LoginServer> getLoginServersFromXML() {
List<LoginServer> loginServers = null;
int id = ctx.getResources().getIdentifier("servers", "xml", ctx.getPackageName());
if (id != 0) {
loginServers = new ArrayList<LoginServer>();
final XmlResourceParser xml = ctx.getResources().getXml(id);
int eventType = -1;
while (eventType != XmlResourceParser.END_DOCUMENT) {
if (eventType == XmlResourceParser.START_TAG) {
if (xml.getName().equals("server")) {
String name = xml.getAttributeValue(null, "name");
String url = xml.getAttributeValue(null, "url");
final LoginServer loginServer = new LoginServer(name,
url, false);
loginServers.add(loginServer);
}
}
try {
eventType = xml.next();
} catch (XmlPullParserException e) {
Log.w("LoginServerManager:getLoginServersFromXml", e);
} catch (IOException e) {
Log.w("LoginServerManager:getLoginServersFromXml", e);
}
}
}
return loginServers;
}
/**
* Initializes the shared pref file with all available servers for
* the first time, if necessary. This is required primarily for the
* first time a user is upgrading to a newer version of the Mobile SDK.
*/
private void initSharedPrefFile() {
final Map<String, ?> values = settings.getAll();
if (values != null && !values.isEmpty()) {
return;
}
List<LoginServer> servers = getLoginServersFromXML();
if (servers == null || servers.isEmpty()) {
servers = getLegacyLoginServers();
}
int numServers = servers.size();
final Editor edit = settings.edit();
for (int i = 0; i < numServers; i++) {
final LoginServer curServer = servers.get(i);
edit.putString(String.format(SERVER_NAME, i), curServer.name);
edit.putString(String.format(SERVER_URL, i), curServer.url);
edit.putBoolean(String.format(IS_CUSTOM, i), curServer.isCustom);
if (i == 0) {
setSelectedLoginServer(curServer);
}
}
edit.putInt(NUMBER_OF_ENTRIES, numServers);
edit.commit();
}
/**
* Class to encapsulate a login server name, URL, index and type (custom or not).
*/
public static class LoginServer {
public final String name;
public final String url;
public final boolean isCustom;
/**
* Parameterized constructor.
*
* @param name Server name.
* @param url Server URL.
* @param isCustom True - if custom URL, False - otherwise.
*/
public LoginServer(String name, String url, boolean isCustom) {
this.name = name;
this.url = url;
this.isCustom = isCustom;
}
@Override
public String toString() {
return "Name: " + name + ", URL: " + url + ", Custom URL: " + isCustom;
}
}
}
| 1 | 14,776 | Turns out that if the hosts are not in an `array`, an `Exception` is not thrown anymore :-( It simply returns `null`. So, we need to attempt to parse a `string` outside the `catch` block for it to work. | forcedotcom-SalesforceMobileSDK-Android | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.