diff --git a/.gitignore b/.gitignore index d6006cd3c..6b487ce1c 100644 --- a/.gitignore +++ b/.gitignore @@ -43,3 +43,5 @@ prepare-*response.log nitro-shim/tools/eifbuild/third_party/** nitro-image.eif bat-go-repro.tar + +/build/ diff --git a/Dockerfile b/Dockerfile index 82f1bb519..524fd0746 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,3 +1,14 @@ +FROM alpine:3.18 as sources + +RUN chown nobody:nobody /srv + +USER nobody + +COPY --chown=nobody:nobody . /srv/repo +RUN mkdir /srv/mod-files && cd /srv/repo && rm -rf .git \ + && find . -name go.\* | xargs tar cf - | tar -C /srv/mod-files -xf - \ + && find . -name go.\* -delete + FROM golang:1.22-alpine as builder # Put certs in builder image. @@ -5,18 +16,23 @@ RUN apk update RUN apk add -U --no-cache ca-certificates && update-ca-certificates RUN apk add make build-base git bash -ARG VERSION -ARG BUILD_TIME -ARG COMMIT - WORKDIR /src -COPY . ./ -RUN chown -R nobody:nobody /src/ && mkdir /.cache && chown -R nobody:nobody /.cache +RUN chown -R nobody:nobody /src && mkdir /.cache && chown -R nobody:nobody /.cache USER nobody -RUN cd main && go mod download && CGO_ENABLED=0 GOOS=linux go build \ +COPY --from=sources --link /srv/mod-files/ ./ + +RUN cd main && go mod download + +COPY --from=sources --link /srv/repo ./ + +ARG VERSION +ARG BUILD_TIME +ARG COMMIT + +RUN cd main && CGO_ENABLED=0 GOOS=linux go build \ -ldflags "-w -s -X main.version=${VERSION} -X main.buildTime=${BUILD_TIME} -X main.commit=${COMMIT}" \ -o bat-go main.go diff --git a/Makefile b/Makefile index d5f551f71..cb88938b3 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ ifdef TEST_RUN TEST_FLAGS = --tags=$(TEST_TAGS) $(TEST_PKG) --run=$(TEST_RUN) endif -.PHONY: all buildcmd docker test create-json-schema lint clean download-mod pcrs pcrs-only nitro-shim/tools/eifbuild/eifbuild +.PHONY: all buildcmd docker docker-local test create-json-schema lint clean download-mod pcrs pcrs-only nitro-shim/tools/eifbuild/eifbuild all: test create-json-schema buildcmd @@ -27,6 +27,15 @@ codeql: download-mod buildcmd buildcmd: cd main && CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) go build -ldflags "-w -s -X main.version=${GIT_VERSION} -X main.buildTime=${BUILD_TIME} -X main.commit=${GIT_COMMIT}" -o ${OUTPUT}/bat-go main.go +# Create a development build for local testing. As running local container +# monitor the executable to restart on change, build first toa temporary +# location and the move to ensure tha the executable is always valid. +builddev: + test -d build || mkdir build + rm -f build/bat-go.tmp + cd main && go build -o ../build/bat-go.tmp main.go + mv build/bat-go.tmp build/bat-go + mock: cd services && mockgen -source=./promotion/claim.go -destination=promotion/mockclaim.go -package=promotion cd services && mockgen -source=./promotion/drain.go -destination=promotion/mockdrain.go -package=promotion diff --git a/libs/nitro/attestation.go b/libs/nitro/attestation.go index b484577bb..614906fc5 100644 --- a/libs/nitro/attestation.go +++ b/libs/nitro/attestation.go @@ -63,6 +63,9 @@ func init() { if err != nil { panic(err) } + if EnclaveMocking() { + ExpectedPCR1 = mockTestPCR(1) + } } func ParsePCRHex(pcrHex string) (PCRBytes, error) { @@ -93,6 +96,10 @@ func hashMessage(message []byte) []byte { func Attest(ctx context.Context, nonce, message, publicKey []byte) ([]byte, error) { messageHash := hashMessage(message) + if EnclaveMocking() { + return messageHash, nil + } + var logger = logging.Logger(ctx, "nitro.Attest") s, err := nsm.OpenDefaultSession() if err != nil { @@ -150,24 +157,34 @@ func verifySigOnlyNotPCRs( sig []byte, verifyTime time.Time, ) (bool, PCRMap, error) { - pool := x509.NewCertPool() - ok := pool.AppendCertsFromPEM([]byte(RootAWSNitroCert)) - if !ok { - return false, nil, errors.New("could not create a valid root cert pool") - } - - res, err := nitrite.Verify( - sig, - nitrite.VerifyOptions{ - Roots: pool, - CurrentTime: verifyTime, - }, - ) - if nil != err { - return false, nil, err - } - expectedHash := res.Document.UserData - pcrs := res.Document.PCRs + var expectedHash []byte + var pcrs PCRMap + if EnclaveMocking() { + pcrs = make(PCRMap) + for i := 0; i < 4; i++ { + pcrs[uint(i)] = mockTestPCR(i) + } + expectedHash = sig + } else { + pool := x509.NewCertPool() + ok := pool.AppendCertsFromPEM([]byte(RootAWSNitroCert)) + if !ok { + return false, nil, errors.New("could not create a valid root cert pool") + } + + res, err := nitrite.Verify( + sig, + nitrite.VerifyOptions{ + Roots: pool, + CurrentTime: verifyTime, + }, + ) + if nil != err { + return false, nil, err + } + expectedHash = res.Document.UserData + pcrs = res.Document.PCRs + } if len(pcrs) == 0 { return false, nil, errors.New("failed to get PCR for the Nitro-signed document") } diff --git a/libs/nitro/aws/sdk.go b/libs/nitro/aws/sdk.go index 9afc7bca5..f500762d2 100644 --- a/libs/nitro/aws/sdk.go +++ b/libs/nitro/aws/sdk.go @@ -114,8 +114,7 @@ func NewAWSConfig(ctx context.Context, proxyAddr string, region string) (aws.Con Str("region", region). Msg("setting up new aws config") - var client http.Client - tr := nitro.NewProxyRoundTripper(ctx, proxyAddr).(*http.Transport) + tr := nitro.NewProxyTransport(ctx, proxyAddr) certs := x509.NewCertPool() certs.AppendCertsFromPEM([]byte(amazonRoots)) @@ -132,7 +131,7 @@ func NewAWSConfig(ctx context.Context, proxyAddr string, region string) (aws.Con return aws.Config{}, fmt.Errorf("failed to configure transport for HTTP/2, %v", err) } - client = http.Client{ + client := &http.Client{ Transport: tr, } @@ -143,7 +142,7 @@ func NewAWSConfig(ctx context.Context, proxyAddr string, region string) (aws.Con } cfg, err := config.LoadDefaultConfig(context.TODO(), - config.WithHTTPClient(&client), + config.WithHTTPClient(client), config.WithRegion("us-west-2"), config.WithLogger(applicationLogger), ) @@ -151,14 +150,20 @@ func NewAWSConfig(ctx context.Context, proxyAddr string, region string) (aws.Con return aws.Config{}, fmt.Errorf("unable to load SDK config, %v", err) } - provider := ec2rolecreds.New(func(options *ec2rolecreds.Options) { - options.Client = imds.NewFromConfig(cfg) - }) - - return config.LoadDefaultConfig(context.TODO(), - config.WithHTTPClient(&client), + configOptions := []func(*config.LoadOptions) error{ + config.WithHTTPClient(client), config.WithRegion(region), - config.WithCredentialsProvider(provider), config.WithLogger(applicationLogger), - ) + } + if !nitro.EnclaveMocking() { + provider := ec2rolecreds.New(func(options *ec2rolecreds.Options) { + options.Client = imds.NewFromConfig(cfg) + }) + configOptions = append( + configOptions, + config.WithCredentialsProvider(provider), + ) + } + + return config.LoadDefaultConfig(context.TODO(), configOptions...) } diff --git a/libs/nitro/log.go b/libs/nitro/log.go index 8a58ccae0..635cc8a5e 100644 --- a/libs/nitro/log.go +++ b/libs/nitro/log.go @@ -2,6 +2,7 @@ package nitro import ( "context" + "io" "net" "os" @@ -17,7 +18,10 @@ type VsockWriter struct { } // NewVsockWriter - create a new vsock writer -func NewVsockWriter(addr string) *VsockWriter { +func NewVsockWriter(addr string) io.Writer { + if EnclaveMocking() { + return os.Stderr + } return &VsockWriter{ socket: nil, addr: addr, @@ -27,7 +31,7 @@ func NewVsockWriter(addr string) *VsockWriter { // Connect - interface implementation for connect method for VsockWriter func (w *VsockWriter) Connect() error { if w.socket == nil { - s, err := DialContext(context.Background(), "tcp", w.addr) + s, err := dialVsockContext(context.Background(), "tcp", w.addr) if err != nil { return err } diff --git a/libs/nitro/mocking.go b/libs/nitro/mocking.go new file mode 100644 index 000000000..bdd6a550a --- /dev/null +++ b/libs/nitro/mocking.go @@ -0,0 +1,37 @@ +package nitro + +import ( + "encoding/hex" + "fmt" + "os" + "strings" +) + +var enclaveMocking = os.Getenv("NITRO_ENCLAVE_MOCKING") != "" + +func EnclaveMocking() bool { + return enclaveMocking +} + +func mockTestPCR(pcrIndex int) []byte { + // Create arbitrary but easily recognizable values in hex + if pcrIndex < 0 || pcrIndex > 8 { + panic("Invalid mocking PCR index") + } + testHex := fmt.Sprintf("abc%d", pcrIndex) + testHex = strings.Repeat(testHex, PCRByteLength*2/4) + pcr, err := hex.DecodeString(testHex) + if err != nil { + panic(err) + } + return pcr +} + +func ReadMockingSecretsFile(fileName string) string { + dir := "payment-test/secrets/" + bytes, err := os.ReadFile(dir + fileName) + if err != nil { + panic(err) + } + return string(bytes) +} diff --git a/libs/nitro/vsock.go b/libs/nitro/vsock.go index 90880eb71..a2df0c463 100644 --- a/libs/nitro/vsock.go +++ b/libs/nitro/vsock.go @@ -55,12 +55,12 @@ func parseVsockAddr(addr string) (uint32, uint32, error) { } // DialContext is a net.Dial wrapper which additionally allows connecting to vsock networks -func DialContext(ctx context.Context, network, addr string) (net.Conn, error) { - logger := logging.Logger(ctx, "nitro.DialContext") +func dialVsockContext(ctx context.Context, network, addr string) (net.Conn, error) { + logger := logging.Logger(ctx, "nitro.dialVsockContext") logger.Debug(). Str("network", fmt.Sprintf("%v", network)). Str("addr", fmt.Sprintf("%v", addr)). - Msg("DialContext") + Msg("dialVsockContext") cid, port, err := parseVsockAddr(addr) if err != nil { @@ -102,12 +102,16 @@ func (p *proxyClientConfig) Proxy(*http.Request) (*url.URL, error) { return v, err } -// NewProxyRoundTripper returns an http.RoundTripper which routes outgoing requests through the proxy addr -func NewProxyRoundTripper(ctx context.Context, addr string) http.RoundTripper { +// NewProxyTransport returns an http.Transport which routes outgoing requests +// through the proxy addr. +func NewProxyTransport(ctx context.Context, addr string) *http.Transport { + if enclaveMocking { + return &http.Transport{} + } config := proxyClientConfig{ctx, addr} return &http.Transport{ Proxy: config.Proxy, - DialContext: DialContext, + DialContext: dialVsockContext, } } @@ -122,7 +126,7 @@ func NewReverseProxyServer( } proxy := httputil.NewSingleHostReverseProxy(proxyURL) proxy.Transport = &http.Transport{ - DialContext: DialContext, + DialContext: dialVsockContext, } proxy.Director = func(req *http.Request) { req.Header.Add("X-Forwarded-Host", req.Host) @@ -240,3 +244,25 @@ func syncCopy(wg *sync.WaitGroup, dst io.WriteCloser, src io.ReadCloser) { defer wg.Done() _, _ = io.Copy(dst, src) } + +func Listen(ctx context.Context, address string) (net.Listener, error) { + if enclaveMocking { + l, err := net.Listen("tcp", address) + if err != nil { + return nil, fmt.Errorf("failed to listen to tcp address %v - %w", address, err) + } + return l, nil + } + + // TODO: share with parseVsockAddr + port, err := strconv.ParseUint(strings.Split(address, ":")[1], 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse vsock address - %w", err) + } + + l, err := vsock.Listen(uint32(port), &vsock.Config{}) + if err != nil { + return nil, fmt.Errorf("failed to listen to vsock %v - %w", address, err) + } + return l, nil +} diff --git a/nitro-shim/scripts/start-proxies.sh b/nitro-shim/scripts/start-proxies.sh index 7383e0fa3..219e766a3 100755 --- a/nitro-shim/scripts/start-proxies.sh +++ b/nitro-shim/scripts/start-proxies.sh @@ -9,8 +9,8 @@ echo "cid is ${CID}" # it's now time to set up proxy tools if [ "${service}" = "/payments" ]; then # setup inbound traffic proxy - export IN_ADDRS=":8080" - export OUT_ADDRS="${CID}:8080" + export IN_ADDRS=":8080,:8443" + export OUT_ADDRS="${CID}:8080,${CID}:8443" echo "${IN_ADDRS} to ${OUT_ADDRS}" # next startup the proxy /enclave/viproxy > /tmp/viproxy.log & diff --git a/payment-test/.gitignore b/payment-test/.gitignore new file mode 100644 index 000000000..58d85f687 --- /dev/null +++ b/payment-test/.gitignore @@ -0,0 +1,5 @@ +# Local files with development scretes. +/secrets/ + +# Home in the container +/container-home/ diff --git a/payment-test/docker-compose.yml b/payment-test/docker-compose.yml new file mode 100644 index 000000000..c498c6e0c --- /dev/null +++ b/payment-test/docker-compose.yml @@ -0,0 +1,133 @@ +services: + redis: + image: redis:7.0 + restart: always + #ports: + # - '6380:6380' + command: redis-server --save 20 1 --loglevel verbose --requirepass testpass --user redis --port 6380 + #volumes: + # - redis-cache:/data + +# Access to localstack: +# export AWS_DEFAULT_REGION=us-east-1 +# export AWS_SECRET_ACCESS_KEY=test +# export AWS_ACCESS_KEY_ID=test +# export AWS_ENDPOINT_URL=http://localhost:4566 + + localstack: + image: localstack/localstack + stop_grace_period: "1s" + ports: + - "127.0.0.1:4566:4566" # LocalStack Gateway + - "127.0.0.1:4510-4559:4510-4559" # external services port range + environment: + # LocalStack configuration: https://docs.localstack.cloud/references/configuration/ + - DEBUG=0 + #volumes: + #- "${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack" + # Access to Docker is not necessary with S3. + # - "/var/run/docker.sock:/var/run/docker.sock" + + shell: + image: bat-go-payment-test + pull_policy: never + build: + context: . + dockerfile: local.dockerfile + depends_on: + - redis + - localstack + volumes: + - ..:/workspace + stop_grace_period: "1s" + # The shell uses localhost for redis by default, so use the redis container + # network for that to work. + network_mode: "service:redis" + command: >- + /workspace/payment-test/scripts/run-as-workspace-user.sh + /workspace/payment-test/scripts/start-shell.sh + working_dir: /workspace/tools/payments + stdin_open: true + tty: true + environment: + - NITRO_ENCLAVE_MOCKING=1 + - NITRO_API_BASE=http://service:18080 + - NITRO_EXPECTED_PCR2=abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2abc2 + - BAT_OPERATOR_KEY=/workspace/payment-test/secrets/test-operator.pem + - DEBUG=1 + - REDIS_ADDR=redis:6379 + - REDIS_USERNAME=default + - REDIS_PASSWORD=testpass + - AWS_REGION=${AWS_REGION-us-west-2} + - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID-} + - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY-} + - AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN-} + - AWS_CONTAINER_CREDENTIALS_FULL_URI=${AWS_CONTAINER_CREDENTIALS_FULL_URI-} + - AWS_CONTAINER_AUTHORIZATION_TOKEN=${AWS_CONTAINER_AUTHORIZATION_TOKEN-} + + worker: + image: bat-go-payment-test + pull_policy: never + depends_on: + - redis + - localstack + - shell + volumes: + - ..:/workspace + command: >- + /workspace/payment-test/scripts/run-as-workspace-user.sh + /workspace/payment-test/scripts/run-until-rebuild.sh + /workspace/build/bat-go serve payments worker + restart: unless-stopped + working_dir: /workspace + environment: + - NITRO_ENCLAVE_MOCKING=1 + - NITRO_API_BASE=http://service:18080 + - REDIS_ADDR=redis:6380 + - REDIS_USER=default + - REDIS_PASS=testpass + - DEBUG=1 + - ENVIRONMENT=development + + service: + image: bat-go-payment-test + pull_policy: never + depends_on: + - redis + - localstack + - shell + volumes: + - ..:/workspace + command: >- + /workspace/payment-test/scripts/run-as-workspace-user.sh + /workspace/payment-test/scripts/run-until-rebuild.sh + /workspace/build/bat-go serve nitro inside-enclave + --egress-address none --log-address none + --upstream-url http://0.0.0.0:8080 + restart: unless-stopped + working_dir: /workspace + environment: + - NITRO_ENCLAVE_MOCKING=1 + - BAT_PAYMENT_TEST_SECRETS=/workspace/payment-test/secrets/payments-test.json + - DEBUG=1 + - ADDR=0.0.0.0:18080 + - ADDR2=0.0.0.0:18443 + - AWS_REGION="us-west-2" + - QLDB_LEDGER_ARN=arn:aws:qldb:us-west-2:239563960694:ledger/testing-igor-payment + - QLDB_LEDGER_NAME=testing-igor-payment + - QLDB_ROLE_ARN=arn:aws:iam::239563960694:role/settlements-dev-sso-qldb-20240628161845683100000001 + - AWS_REGION=${AWS_REGION-us-west-2} + - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID-} + - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY-} + - AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN-} + - AWS_CONTAINER_CREDENTIALS_FULL_URI=${AWS_CONTAINER_CREDENTIALS_FULL_URI-} + - AWS_CONTAINER_AUTHORIZATION_TOKEN=${AWS_CONTAINER_AUTHORIZATION_TOKEN-} + + # For testing in the browser. + ports: + - "127.0.0.1:18080:18080" + - "127.0.0.1:18443:18443" + +#volumes: +# redis-cache: +# driver: local diff --git a/payment-test/local.dockerfile b/payment-test/local.dockerfile new file mode 100644 index 000000000..c06e59ef5 --- /dev/null +++ b/payment-test/local.dockerfile @@ -0,0 +1,24 @@ +FROM debian:bookworm + +ARG DEBIAN_FRONTEND=noninteractive +ARG GOLANG_VERSION=1.22.4 + +RUN apt-get update \ + && apt-get install -y -qq \ + tmux curl man less openssh-client openssl util-linux \ + python3 ipython3 python-is-python3 \ + socat lsof wget diffutils \ + git make \ + redis-tools \ + node-sshpk + +# Install Go +RUN set -x && curl -L -o /var/tmp/go.tgz \ + "https://go.dev/dl/go$GOLANG_VERSION.linux-amd64.tar.gz" \ + && tar -C /usr/local -xf /var/tmp/go.tgz \ + && rm /var/tmp/go.tgz \ + && find /usr/local/go/bin -type f -perm /001 \ + -exec ln -s -t /usr/local/bin '{}' + + +# Use arbitrary id, not 1000, to always test the code to sync the user id. +RUN useradd -m -u 12345 user diff --git a/payment-test/scripts/ensure-secretes.sh b/payment-test/scripts/ensure-secretes.sh new file mode 100755 index 000000000..75b8c97ae --- /dev/null +++ b/payment-test/scripts/ensure-secretes.sh @@ -0,0 +1,54 @@ +#!/bin/sh + +set -eu + +self="$(realpath "$0")" +secrets="${self%/*/*}/secrets" + +test -d "$secrets" || { + echo "Creating $secrets directory" >&2 + mkdir -m 0700 "$secrets" +} + +f="$secrets/payments-test.json" +test -s "$f" || { + echo "The file with configuration keys $f does not exist or empty, please obtain it" + exit 1 +} + +ensureEd25519Key() { + local name pem pub x + name="$1" + + # We need to generate ED25519 key in PEM format and its public key in + # OpenSSH format. Unfortunately ssh-keygen released only in 2024 supports + # that. So use other tools. + + pem="$secrets/$name.pem" + test -s "$pem" || { + echo "ED25519 private key file $pem does not exist, generating it" >&2 + x="$(command -v openssl 2>/dev/null || :)" + test "$x" || { + echo "openssl tool does not exist, please install it. On Debian-based system use:" >&2 + echo " apt install openssl" >&2 + exit 1 + } + rm -f "$pem.tmp" + openssl genpkey -algorithm ed25519 > "$pem.tmp" + mv "$pem.tmp" "$pem" + } + pub="${pem%.pem}.pub" + test -s "$pub" || { + echo "ED25519 public key file $pub does not exist, producing it from" >&2 + x="$(command -v sshpk-conv 2>/dev/null || :)" + test "$x" || { + echo "sshpk-conv utility does not exist, please install it. On Debian-based system use:" >&2 + echo " apt install node-sshpk" >&2 + exit 1 + } + sshpk-conv -T pem -t ssh -f "$pem" -o "$pub" -c "$name" + } +} + +ensureEd25519Key test-operator +ensureEd25519Key test-operator2 diff --git a/payment-test/scripts/run-as-workspace-user.sh b/payment-test/scripts/run-as-workspace-user.sh new file mode 100755 index 000000000..b522f32a3 --- /dev/null +++ b/payment-test/scripts/run-as-workspace-user.sh @@ -0,0 +1,38 @@ +#!/bin/sh + +# Helper to sync the container user with the id of the owner of workspace and +# then run the command as that user. + +set -eu + +workspace_uid_gid="$(stat -c %u:%g /workspace)" +uid="${workspace_uid_gid%:*}" +gid="${workspace_uid_gid#*:}" + +h="/workspace/payment-test/container-home" + +if ! test -h /home/user; then + # The user home is not a symlink so the user in the container not yet + # adjusted. + groupmod -g "$gid" user + usermod -u "$uid" user + + # Updating files inside /workspace may race with other containers. So to + # copy skeleton first copy it to a temporeary location and then move + # atomically. + if ! test -d "$h"; then + echo "Creating $h" >&2 + chown -R user:user /home/user + tmp="$(mktemp -u -p "${h%/*}")" + cp -a /home/user "$tmp" || { rm -rf "$tmp"; exit 1; } + mv "$tmp" "$h" || { rm -rf "$tmp"; exit 1; } + fi + rm -rf /home/user + ln -s "$h" /home/user +fi + +# We want to keep the current environmnet for the subprocess so we do not use +# --reset-env with setprov. Rather we just fixup few variables using env. +exec setpriv --init-groups --regid "$gid" --reuid "$uid" --no-new-privs \ + env HOME="$h" SHELL=/usr/bin/bash USER=user LOGNAME=user \ + PATH=/usr/local/bin:/usr/bin "$@" diff --git a/payment-test/scripts/run-until-rebuild.sh b/payment-test/scripts/run-until-rebuild.sh new file mode 100755 index 000000000..45c95be08 --- /dev/null +++ b/payment-test/scripts/run-until-rebuild.sh @@ -0,0 +1,36 @@ +#!/bin/sh + +# Helper to run a command with its arguments until the timestamp of its +# executable changes. + +set -eu + +target="$1" + +test -x "$1" || { + echo "The argument $1 is not an executable" >&2 + exit 1 +} + +monitor() { + local modification_time t + modification_time="$(stat -c "%Y" "$target")" + while :; do + sleep 1 + t="$(stat -c "%Y" "$target")" + if test "$t" -ne "$modification_time"; then + echo "Newer $target is detected, restarting" >&2 + kill "$$" + sleep 0.5 + kill -9 "$$" + fi + done +} + +monitor & + +bar="======================================================================" +t="$(date '+%Y-%m-%d %H:%M:%S')" +printf '%s\n[%s] Running\n[%s] %s\n%s\n' "$bar" "$t" "$t" "$*" "$bar" >&2 + +exec "$@" diff --git a/payment-test/scripts/start-shell.sh b/payment-test/scripts/start-shell.sh new file mode 100755 index 000000000..dc29cb0d1 --- /dev/null +++ b/payment-test/scripts/start-shell.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +set -eu + +self="$(realpath "$0")" +scripts="${self%/*}" +repo="${scripts%/*/*}" + +"$scripts/ensure-secretes.sh" + +cd "$repo/tools/payments" + +exec ipython3 --profile-dir=ipython-profile diff --git a/services/cmd/serve.go b/services/cmd/serve.go index 9007f7d4f..7f4760b5d 100644 --- a/services/cmd/serve.go +++ b/services/cmd/serve.go @@ -28,9 +28,12 @@ func init() { // address - sets the address of the server to be started ServeCmd.PersistentFlags().String("address", ":8080", - "the default address to bind to") + "the address to bind to the HTTP server") + ServeCmd.PersistentFlags().String("address2", ":8443", + "the address to bind the HTTPS server") cmdutils.Must(viper.BindPFlag("address", ServeCmd.PersistentFlags().Lookup("address"))) cmdutils.Must(viper.BindEnv("address", "ADDR")) + cmdutils.Must(viper.BindEnv("address2", "ADDR2")) ServeCmd.PersistentFlags().Bool("enable-job-workers", true, "enable job workers (defaults true)") @@ -81,6 +84,7 @@ func SetupRouter(ctx context.Context) *chi.Mux { Str("build_time", ctx.Value(appctx.BuildTimeCTXKey).(string)). Str("ratios_service", viper.GetString("ratios-service")). Str("address", viper.GetString("address")). + Str("address2", viper.GetString("address2")). Str("environment", viper.GetString("environment")). Msg("server starting") } diff --git a/services/nitro/nitro.go b/services/nitro/nitro.go index 05e5f2296..5b730dcb8 100644 --- a/services/nitro/nitro.go +++ b/services/nitro/nitro.go @@ -2,8 +2,15 @@ package nitro import ( "context" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" "fmt" + "math/big" "net/http" + "os" "runtime/debug" "strconv" "strings" @@ -17,7 +24,6 @@ import ( "github.com/brave-intl/bat-go/services/payments" "github.com/go-chi/chi" - "github.com/mdlayher/vsock" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -111,9 +117,9 @@ func RunNitroServerInEnclave(cmd *cobra.Command, args []string) error { ctx := cmd.Context() logaddr := viper.GetString("log-address") - writer := nitro.NewVsockWriter(logaddr) + logWriter := nitro.NewVsockWriter(logaddr) - ctx = context.WithValue(ctx, appctx.LogWriterCTXKey, writer) + ctx = context.WithValue(ctx, appctx.LogWriterCTXKey, logWriter) ctx = context.WithValue(ctx, appctx.EgressProxyAddrCTXKey, viper.GetString("egress-address")) ctx = context.WithValue(ctx, appctx.AWSRegionCTXKey, viper.GetString("aws-region")) ctx = context.WithValue(ctx, appctx.PaymentsQLDBRoleArnCTXKey, viper.GetString("qldb-role-arn")) @@ -136,6 +142,7 @@ func RunNitroServerInEnclave(cmd *cobra.Command, args []string) error { Str("panic", fmt.Sprintf("%+v", rec)). Str("stacktrace", string(debug.Stack())). Msg("panic recovered") + os.Exit(2) } }() @@ -152,31 +159,85 @@ func RunNitroServerInEnclave(cmd *cobra.Command, args []string) error { logger.Info().Msg("payments routes setup") // setup listener - addr := viper.GetString("address") - port, err := strconv.ParseUint(strings.Split(addr, ":")[1], 10, 32) - if err != nil || port == 0 { - // panic if there is an error, or if the port is too large to fit in uint32 - logger.Panic().Err(err).Msg("invalid --address") - } + httpListenAddress := viper.GetString("address") + + httpsListenAddress := viper.GetString("address2") // setup vsock listener - l, err := vsock.Listen(uint32(port), &vsock.Config{}) + httpListener, err := nitro.Listen(ctx, httpListenAddress) + if err != nil { + logger.Fatal().Err(err).Msg("failed to listen to HTTP port") + } + httpsListener, err := nitro.Listen(ctx, httpsListenAddress) if err != nil { - logger.Panic().Err(err).Msg("listening on vsock port failed") + logger.Fatal().Err(err).Msg("failed to listen to HTTPS port") } logger.Info().Msg("vsock listener setup") + + tlsCertificate, err := createSelfSignedCertificate() + if err != nil { + logger.Fatal().Err(err).Msg("failed to create a self-signed certificate") + } + // setup server srv := http.Server{ Handler: chi.ServerBaseContext(ctx, r), ReadTimeout: 3 * time.Second, WriteTimeout: 20 * time.Second, + TLSConfig: &tls.Config{}, } - logger.Info().Msg("starting server") - // run the server in another routine - logger.Fatal().Err(srv.Serve(l)).Msg("server shutdown") + srv.TLSConfig.Certificates = []tls.Certificate{tlsCertificate} + + logger.Info().Msgf( + "starting server on %s and %s", httpListenAddress, httpsListenAddress) + + errChan := make(chan error, 2) + go func() { + errChan <- srv.Serve(httpListener) + }() + go func() { + errChan <- srv.ServeTLS(httpsListener, "", "") + }() + err = <-errChan + logger.Fatal().Err(err).Msg("server shutdown") return nil } +func createSelfSignedCertificate() (tls.Certificate, error) { + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return tls.Certificate{}, err + } + keyUsage := x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment + timeNow := time.Now() + + // Allow the client clock to be off by 1 min + notBefore := timeNow.Add(-time.Minute) + notAfter := timeNow.AddDate(10, 0, 0) + serialNumber := big.NewInt(1) + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Brave Software"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: keyUsage, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + template.DNSNames = []string{"nitro.localdomain"} + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, privateKey.Public(), privateKey) + if err != nil { + return tls.Certificate{}, err + } + return tls.Certificate{ + Certificate: [][]byte{derBytes}, + PrivateKey: privateKey, + }, nil +} + // RunNitroServerOutsideEnclave - start up all the services which are outside func RunNitroServerOutsideEnclave(cmd *cobra.Command, args []string) error { ctx := cmd.Context() diff --git a/services/payments/cmd/worker.go b/services/payments/cmd/worker.go index 1f2dc659f..c21e1a23d 100644 --- a/services/payments/cmd/worker.go +++ b/services/payments/cmd/worker.go @@ -3,6 +3,7 @@ package cmd import ( rootcmd "github.com/brave-intl/bat-go/cmd" appctx "github.com/brave-intl/bat-go/libs/context" + "github.com/brave-intl/bat-go/libs/nitro" "github.com/brave-intl/bat-go/libs/redisconsumer" "github.com/brave-intl/bat-go/services/payments" "github.com/spf13/cobra" @@ -20,7 +21,11 @@ func WorkerRun(command *cobra.Command, args []string) { user := viper.GetString("redis-user") pass := viper.GetString("redis-pass") - redisClient, err := redisconsumer.NewStreamClient(ctx, env, addr, user, pass, true) + redisUseTLS := true + if nitro.EnclaveMocking() { + redisUseTLS = false + } + redisClient, err := redisconsumer.NewStreamClient(ctx, env, addr, user, pass, redisUseTLS) if err != nil { logger.Error().Err(err).Msg("failed to start redis consumer") return diff --git a/services/payments/controllers.go b/services/payments/controllers.go index 69a1dfda2..2d0039290 100644 --- a/services/payments/controllers.go +++ b/services/payments/controllers.go @@ -63,8 +63,10 @@ func SetupRouter(ctx context.Context, s *Service) (context.Context, *chi.Mux) { r.Route("/v1/payments", func(r chi.Router) { // Set date header with current date r.Use(middleware.SetResponseDate()) - // Sign all payments responses - r.Use(middleware.SignResponse(ps)) + if !nitro.EnclaveMocking() { + // Sign all payments responses + r.Use(middleware.SignResponse(ps)) + } // Log all payments requests r.Use(middleware.RequestLogger(logger)) diff --git a/services/payments/datastore.go b/services/payments/datastore.go index 213fd984e..3985ec0fd 100644 --- a/services/payments/datastore.go +++ b/services/payments/datastore.go @@ -544,6 +544,15 @@ func newQLDBDatastore(ctx context.Context) (*QLDBDatastore, error) { creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(awsCfg), qldbRoleArn) awsCfg.Credentials = aws.NewCredentialsCache(creds) + if false { + // This may be necessary when connecting from the local dev environment + // to QLDB. + _, err := awsCfg.Credentials.Retrieve(ctx) + if err != nil { + return nil, fmt.Errorf("failed to retrieve credentials for QLDB: %w", err) + } + } + client := qldbsession.NewFromConfig(awsCfg) // create our qldb driver driver, err := qldbdriver.New( diff --git a/services/payments/operators.go b/services/payments/operators.go index 88a7f4391..b2cb7a4ef 100644 --- a/services/payments/operators.go +++ b/services/payments/operators.go @@ -1,6 +1,10 @@ package payments -import "os" +import ( + "os" + + "github.com/brave-intl/bat-go/libs/nitro" +) const ( jegan = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDfcr9jUEu9D9lSpUnPwT1cCggCe48kZw1bJt+CXYSnh jegan+settlements@brave.com" @@ -12,6 +16,12 @@ const ( // vaultManagerKeys returns the set of keys permitted to interact with the secrets vault. func vaultManagerKeys() []string { + if nitro.EnclaveMocking() { + return []string{ + nitro.ReadMockingSecretsFile("test-operator.pub"), + nitro.ReadMockingSecretsFile("test-operator2.pub"), + } + } switch os.Getenv("ENV") { case "staging": return []string{jegan, evq} diff --git a/services/payments/secrets.go b/services/payments/secrets.go index 444e24587..0ddea75ff 100644 --- a/services/payments/secrets.go +++ b/services/payments/secrets.go @@ -14,6 +14,7 @@ import ( "errors" "fmt" "io" + "os" "strings" "time" @@ -250,7 +251,14 @@ func (s *Service) createSolanaAddress(ctx context.Context, bucket, creatorKey st if err != nil { return nil, fmt.Errorf("failed to create aws config: %w", err) } - s3Client := s3.NewFromConfig(awsCfg) + + var s3OptFns []func(*s3.Options) + if nitro.EnclaveMocking() { + s3OptFns = append(s3OptFns, func(o *s3.Options) { + o.BaseEndpoint = aws.String("http://localstack:4566") + }) + } + s3Client := s3.NewFromConfig(awsCfg, s3OptFns...) h := md5.New() h.Write(encBuf.Bytes()) @@ -562,6 +570,26 @@ func (u *Unsealing) decryptSecrets(ctx context.Context) error { return nil } +func (u *Unsealing) readTestSecretes() error { + envName := "BAT_PAYMENT_TEST_SECRETS" + secretsPath := os.Getenv(envName) + if secretsPath == "" { + return fmt.Errorf("The environment variable %s is not set", envName) + } + f, err := os.Open(secretsPath) + if err != nil { + return fmt.Errorf("Failed to open the test secrets from %s - %w", envName, err) + } + + output := map[string]string{} + if err := json.NewDecoder(f).Decode(&output); err != nil { + return fmt.Errorf( + "failed to json decode the test secretes %s: %w", secretsPath, err) + } + u.secrets = output + return nil +} + func getDecryptReader( ctx context.Context, key OperatorKey, diff --git a/services/payments/service.go b/services/payments/service.go index 8373c8c36..4882dc3da 100644 --- a/services/payments/service.go +++ b/services/payments/service.go @@ -221,29 +221,32 @@ func NewService(ctx context.Context) (context.Context, *Service, error) { return nil, nil, errors.New("no egress addr for payments service") } - pcrs, err := nitro.GetPCRs() - if err != nil { - logger.Fatal().Err(err).Msg("could not retrieve nitro PCRs") - return nil, nil, errors.New("could not retrieve nitro PCRs") - } - store, err := NewVerifierStore() - if err != nil { - logger.Fatal().Err(err).Msg("could not create verifier store") - return nil, nil, errors.New("could not create verifier store") - } - service := &Service{ - baseCtx: ctx, - awsCfg: awsCfg, - publicKey: hex.EncodeToString(pcrs[2]), - signer: nitro.Signer{}, - verifierStore: store, - egressAddr: egressAddr, + baseCtx: ctx, + awsCfg: awsCfg, + signer: nitro.Signer{}, + egressAddr: egressAddr, } - // create the kms encryption key for this service for bootstrap operator shares - if err := service.configureKMSEncryptionKey(ctx); err != nil { - return nil, nil, fmt.Errorf("could not create kms secret encryption key: %w", err) + if !nitro.EnclaveMocking() { + pcrs, err := nitro.GetPCRs() + if err != nil { + logger.Fatal().Err(err).Msg("could not retrieve nitro PCRs") + return nil, nil, errors.New("could not retrieve nitro PCRs") + } + service.publicKey = hex.EncodeToString(pcrs[2]) + + verifierStore, err := NewVerifierStore() + if err != nil { + logger.Fatal().Err(err).Msg("could not create verifier store") + return nil, nil, errors.New("could not create verifier store") + } + service.verifierStore = verifierStore + + // create the kms encryption key for this service for bootstrap operator shares + if err := service.configureKMSEncryptionKey(ctx); err != nil { + return nil, nil, fmt.Errorf("could not create kms secret encryption key: %w", err) + } } service.datastore, err = configureDatastore(ctx) @@ -272,20 +275,26 @@ func (s *Service) unsealConfig( getChainAddress: s.datastore.GetChainAddress, } - operatorErrorCh := make(chan error, 1) - go func() { - operatorErrorCh <- unsealing.fetchOperatorShares(ctx, logger) - }() - - err := unsealing.fetchSecretes(ctx, logger) - err2 := <-operatorErrorCh - if err != nil || err2 != nil { - return errors.Join(err, err2) - } + if nitro.EnclaveMocking() { + if err := unsealing.readTestSecretes(); err != nil { + return err + } + } else { + operatorErrorCh := make(chan error, 1) + go func() { + operatorErrorCh <- unsealing.fetchOperatorShares(ctx, logger) + }() + + err := unsealing.fetchSecretes(ctx, logger) + err2 := <-operatorErrorCh + if err != nil || err2 != nil { + return errors.Join(err, err2) + } - err = unsealing.decryptSecrets(ctx) - if err != nil { - return err + err = unsealing.decryptSecrets(ctx) + if err != nil { + return err + } } logger.Debug().Msg("decrypted secrets without error") diff --git a/services/payments/statemachine.go b/services/payments/statemachine.go index 293e59330..122cb9e96 100644 --- a/services/payments/statemachine.go +++ b/services/payments/statemachine.go @@ -98,7 +98,7 @@ func (s *Service) StateMachineFromTransaction( var machine TxStateMachine client := http.Client{ - Transport: nitro.NewProxyRoundTripper(ctx, s.egressAddr).(*http.Transport), + Transport: nitro.NewProxyTransport(ctx, s.egressAddr), } switch authenticatedState.PaymentDetails.Custodian { diff --git a/tools/payments/client.go b/tools/payments/client.go index d75498404..58b80837b 100644 --- a/tools/payments/client.go +++ b/tools/payments/client.go @@ -50,6 +50,10 @@ type SettlementClient interface { func NewSettlementClient(ctx context.Context, env string, config map[string]string) (context.Context, SettlementClient, error) { ctx, _ = logging.SetupLogger(ctx) + if nitro.EnclaveMocking() { + + } + var sp httpsignature.SignatureParams sp.Algorithm = httpsignature.AWSNITRO sp.KeyID = "primary" diff --git a/tools/payments/ipython-profile/ipython_config.py b/tools/payments/ipython-profile/ipython_config.py index ede5ddf4a..7606dfa3e 100644 --- a/tools/payments/ipython-profile/ipython_config.py +++ b/tools/payments/ipython-profile/ipython_config.py @@ -5,13 +5,13 @@ # InteractiveShellApp(Configurable) configuration #------------------------------------------------------------------------------ ## A Mixin for applications that start InteractiveShell instances. -# +# # Provides configurables for loading extensions and executing files # as part of configuring a Shell environment. -# +# # The following methods should be called by the :meth:`initialize` method # of the subclass: -# +# # - :meth:`init_path` # - :meth:`init_shell` (to be implemented by the subclass) # - :meth:`init_gui_pylab` @@ -37,13 +37,13 @@ "%alias status dist/status -e $$environment -p $$payout_id -rp $$redis_password -ru $$redis_username", "%alias authorize dist/authorize -e $$environment -p $$payout_id -k $$operator_key -ru $$redis_username -rp $$redis_password -pr $$payout_report -pcr2 $$pcr2 $$prepare_log", "%alias bootstrap dist/bootstrap -b $$secrets_s3_bucket -v -e $$environment -p $$operator_key -pcr2 $$pcr2 %s", -"%alias prepare dist/prepare -e $$environment -p $$payout_id -ru $$redis_username -rp $$redis_password -pcr2 $$pcr2 $$payout_report", +"%alias prepare dist/prepare -e $$environment -p $$payout_id -k $$operator_key -ru $$redis_username -rp $$redis_password -pcr2 $$pcr2 $$payout_report", "%alias report dist/report -e $$environment -p $$payout_id -ru $$redis_username -rp $$redis_password -pcr2 $$pcr2 $$payout_report", "%alias info dist/info -e $$environment -pcr2 $$pcr2 $$payout_report", "%alias await_prepare touch $$prepare_log && dist/prepare -cg cli2 -e $$environment -p $$payout_id -ru $$redis_username -rp $$redis_password -pcr2 $$pcr2 $$payout_report", -"%alias redis redis-cli -p 6380 --user $$redis_username --pass $$redis_password", -"%alias flush_redis redis-cli -p 6380 --user $$redis_username --pass $$redis_password del \*$$payout_id\*", -"%alias list_payouts redis-cli -p 6380 --user $$redis_username --pass $$redis_password keys \*", +"%alias redis redis-cli -p 6380 --user $$redis_username", +"%alias flush_redis redis-cli -p 6380 --user $$redis_username del \*$$payout_id\*", +"%alias list_payouts redis-cli -p 6380 --user $$redis_username keys \*", "%alias worker_logs AWS_VAULT= kubectl --context $$cluster --namespace payment-$$environment logs deployments/worker", "%alias enclave_logs AWS_VAULT= kubectl --context $$cluster --namespace payment-$$environment logs deployments/web -c nitro-utils", "%alias pods AWS_VAULT= kubectl --context $$cluster --namespace payment-$$environment get pods", @@ -58,9 +58,9 @@ # c.InteractiveShellApp.extensions = [] ## Dotted module name(s) of one or more IPython extensions to load. -# +# # For specifying extra extensions to load on the command-line. -# +# # .. versionadded:: 7.10 # Default: [] # c.InteractiveShellApp.extra_extensions = [] @@ -105,7 +105,7 @@ ## If true, IPython will populate the user namespace with numpy, pylab, etc. # and an ``import *`` is done from numpy and pylab, when using pylab mode. -# +# # When False, pylab mode should not import any names into the user # namespace. # Default: True @@ -134,32 +134,32 @@ # c.Application.log_level = 30 ## Configure additional log handlers. -# +# # The default stderr logs handler is configured by the log_level, log_datefmt # and log_format settings. -# +# # This configuration can be used to configure additional handlers (e.g. to # output the log to a file) or for finer control over the default handlers. -# +# # If provided this should be a logging configuration dictionary, for more # information see: # https://docs.python.org/3/library/logging.config.html#logging-config- # dictschema -# +# # This dictionary is merged with the base logging configuration which defines # the following: -# +# # * A logging formatter intended for interactive use called # ``console``. # * A logging handler that writes to stderr called # ``console`` which uses the formatter ``console``. # * A logger with the name of this application set to ``DEBUG`` # level. -# +# # This example adds a new handler that writes to a file: -# +# # .. code-block:: python -# +# # c.Application.logging_config = { # "handlers": { # "file": { @@ -206,7 +206,7 @@ # c.BaseIPythonApplication.copy_config_files = False ## Path to an extra config file to load. -# +# # If specified, load this config file in addition to any other IPython # config. # Default: '' @@ -231,7 +231,7 @@ # See also: Application.log_level # c.BaseIPythonApplication.log_level = 30 -## +## # See also: Application.logging_config # c.BaseIPythonApplication.logging_config = {} @@ -295,7 +295,7 @@ # See also: BaseIPythonApplication.extra_config_file # c.TerminalIPythonApp.extra_config_file = '' -## +## # See also: InteractiveShellApp.extra_extensions # c.TerminalIPythonApp.extra_extensions = [] @@ -328,7 +328,7 @@ # Default: 'IPython.terminal.interactiveshell.TerminalInteractiveShell' # c.TerminalIPythonApp.interactive_shell_class = 'IPython.terminal.interactiveshell.TerminalInteractiveShell' -## +## # See also: BaseIPythonApplication.ipython_dir # c.TerminalIPythonApp.ipython_dir = '' @@ -344,7 +344,7 @@ # See also: Application.log_level # c.TerminalIPythonApp.log_level = 30 -## +## # See also: Application.logging_config # c.TerminalIPythonApp.logging_config = {} @@ -554,11 +554,11 @@ #------------------------------------------------------------------------------ # TerminalInteractiveShell(InteractiveShell) configuration #------------------------------------------------------------------------------ -## +## # See also: InteractiveShell.ast_node_interactivity # c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr' -## +## # See also: InteractiveShell.ast_transformers # c.TerminalInteractiveShell.ast_transformers = [] @@ -567,11 +567,11 @@ # Default: False # c.TerminalInteractiveShell.auto_match = False -## +## # See also: InteractiveShell.autoawait # c.TerminalInteractiveShell.autoawait = True -## +## # See also: InteractiveShell.autocall # c.TerminalInteractiveShell.autocall = 0 @@ -579,11 +579,11 @@ # Default: None # c.TerminalInteractiveShell.autoformatter = None -## +## # See also: InteractiveShell.autoindent # c.TerminalInteractiveShell.autoindent = True -## +## # See also: InteractiveShell.automagic # c.TerminalInteractiveShell.automagic = True @@ -602,11 +602,11 @@ # See also: InteractiveShell.banner2 # c.TerminalInteractiveShell.banner2 = '' -## +## # See also: InteractiveShell.cache_size # c.TerminalInteractiveShell.cache_size = 1000 -## +## # See also: InteractiveShell.color_info # c.TerminalInteractiveShell.color_info = True @@ -658,7 +658,7 @@ # Default: True # c.TerminalInteractiveShell.enable_history_search = True -## +## # See also: InteractiveShell.enable_html_pager # c.TerminalInteractiveShell.enable_html_pager = False @@ -690,7 +690,7 @@ # See also: InteractiveShell.history_length # c.TerminalInteractiveShell.history_length = 10000 -## +## # See also: InteractiveShell.history_load_length # c.TerminalInteractiveShell.history_load_length = 1000 @@ -701,15 +701,15 @@ # See also: InteractiveShell.ipython_dir # c.TerminalInteractiveShell.ipython_dir = '' -## +## # See also: InteractiveShell.logappend # c.TerminalInteractiveShell.logappend = '' -## +## # See also: InteractiveShell.logfile # c.TerminalInteractiveShell.logfile = '' -## +## # See also: InteractiveShell.logstart # c.TerminalInteractiveShell.logstart = False @@ -734,7 +734,7 @@ # See also: InteractiveShell.object_info_string_level # c.TerminalInteractiveShell.object_info_string_level = 0 -## +## # See also: InteractiveShell.pdb # c.TerminalInteractiveShell.pdb = False @@ -770,20 +770,20 @@ # c.TerminalInteractiveShell.separate_out2 = '' ## Add, disable or modifying shortcuts. -# +# # Each entry on the list should be a dictionary with ``command`` key # identifying the target function executed by the shortcut and at least # one of the following: -# +# # - ``match_keys``: list of keys used to match an existing shortcut, # - ``match_filter``: shortcut filter used to match an existing shortcut, # - ``new_keys``: list of keys to set, # - ``new_filter``: a new shortcut filter to set -# +# # The filters have to be composed of pre-defined verbs and joined by one # of the following conjunctions: ``&`` (and), ``|`` (or), ``~`` (not). # The pre-defined verbs are: -# +# # - `always` # - `never` # - `has_line_below` @@ -825,16 +825,16 @@ # - `navigable_suggestions` # - `cursor_in_leading_ws` # - `pass_through` -# +# # To disable a shortcut set ``new_keys`` to an empty list. # To add a shortcut add key ``create`` with value ``True``. -# +# # When modifying/disabling shortcuts, ``match_keys``/``match_filter`` can # be omitted if the provided specification uniquely identifies a shortcut # to be modified/disabled. When modifying a shortcut ``new_filter`` or # ``new_keys`` can be omitted which will result in reuse of the existing # filter/keys. -# +# # Only shortcuts defined in IPython (and not default prompt-toolkit # shortcuts) can be modified or disabled. The full list of shortcuts, # command identifiers and filters is available under @@ -847,10 +847,10 @@ # c.TerminalInteractiveShell.show_rewritten_input = True ## Use `raw_input` for the REPL, without completion and prompt colors. -# +# # Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR. Known usage are: # IPython own testing machinery, and emacs inferior-shell integration through elpy. -# +# # This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` # environment variable is set, or the current terminal is not a tty. # Default: False @@ -863,7 +863,7 @@ # Default: 6 # c.TerminalInteractiveShell.space_for_menu = 6 -## +## # See also: InteractiveShell.sphinxify_docstring # c.TerminalInteractiveShell.sphinxify_docstring = False @@ -884,7 +884,7 @@ ## Use 24bit colors instead of 256 colors in prompt highlighting. # If your terminal supports true color, the following command should # print ``TRUECOLOR`` in orange:: -# +# # printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n" # Default: False # c.TerminalInteractiveShell.true_color = False @@ -910,19 +910,19 @@ # HistoryAccessor(HistoryAccessorBase) configuration #------------------------------------------------------------------------------ ## Access the history database without adding to it. -# +# # This is intended for use by standalone history tools. IPython shells use # HistoryManager, below, which is a subclass of this. ## Options for configuring the SQLite connection -# +# # These options are passed as keyword args to sqlite3.connect # when establishing database connections. # Default: {} # c.HistoryAccessor.connection_options = {} ## enable the SQLite history -# +# # set enabled=False to disable the SQLite history, # in which case there will be no stored history, no SQLite connection, # and no background saving thread. This may be necessary in some @@ -931,17 +931,17 @@ # c.HistoryAccessor.enabled = True ## Path to file to use for SQLite history database. -# +# # By default, IPython will put the history database in the IPython # profile directory. If you would rather share one history among # profiles, you can set this value in each, so that they are consistent. -# +# # Due to an issue with fcntl, SQLite is known to misbehave on some NFS # mounts. If you see IPython hanging, try setting this to something on a # local disk, e.g:: -# +# # ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite -# +# # you can also use the specific value `:memory:` (including the colon # at both end but not the back ticks), to avoid creating an history file. # Default: traitlets.Undefined @@ -983,24 +983,24 @@ # c.MagicsManager.auto_magic = True ## Mapping from magic names to modules to load. -# +# # This can be used in IPython/IPykernel configuration to declare lazy magics # that will only be imported/registered on first use. -# +# # For example:: -# +# # c.MagicsManager.lazy_magics = { # "my_magic": "slow.to.import", # "my_other_magic": "also.slow", # } -# +# # On first invocation of `%my_magic`, `%%my_magic`, `%%my_other_magic` or # `%%my_other_magic`, the corresponding module will be loaded as an ipython # extensions as if you had previously done `%load_ext ipython`. -# +# # Magics names should be without percent(s) as magics can be both cell and line # magics. -# +# # Lazy loading happen relatively late in execution process, and complex # extensions that manipulate Python/IPython internal state or global state might # not support lazy loading. @@ -1011,10 +1011,10 @@ # ProfileDir(LoggingConfigurable) configuration #------------------------------------------------------------------------------ ## An object to manage the profile directory and its resources. -# +# # The profile directory is used by all IPython applications, to manage # configuration, logging and security. -# +# # This object knows how to find, create and manage these directories. This # should be used by any code that wants to handle profiles. @@ -1027,25 +1027,25 @@ # BaseFormatter(Configurable) configuration #------------------------------------------------------------------------------ ## A base formatter class that is configurable. -# +# # This formatter should usually be used as the base class of all formatters. # It is a traited :class:`Configurable` class and includes an extensible # API for users to determine how their objects are formatted. The following # logic is used to find a function to format an given object. -# +# # 1. The object is introspected to see if it has a method with the name # :attr:`print_method`. If is does, that object is passed to that method # for formatting. # 2. If no print method is found, three internal dictionaries are consulted # to find print method: :attr:`singleton_printers`, :attr:`type_printers` # and :attr:`deferred_printers`. -# +# # Users should use these dictionaries to register functions that will be # used to compute the format data for their objects (if those objects don't # have the special print methods). The easiest way of using these # dictionaries is through the :meth:`for_type` and :meth:`for_type_by_name` # methods. -# +# # If no function/callable is found to compute the format data, ``None`` is # returned and this format type is not used. @@ -1065,12 +1065,12 @@ # PlainTextFormatter(BaseFormatter) configuration #------------------------------------------------------------------------------ ## The default pretty-printer. -# +# # This uses :mod:`IPython.lib.pretty` to compute the format data of # the object. If the object cannot be pretty printed, :func:`repr` is used. # See the documentation of :mod:`IPython.lib.pretty` for details on # how to write pretty printers. Here is a simple example:: -# +# # def dtype_pprinter(obj, p, cycle): # if cycle: # return p.text('dtype(...)') @@ -1093,7 +1093,7 @@ # c.PlainTextFormatter.float_precision = '' ## Truncate large collections (lists, dicts, tuples, sets) to this size. -# +# # Set to 0 to disable truncation. # Default: 1000 # c.PlainTextFormatter.max_seq_length = 1000 @@ -1120,7 +1120,7 @@ # Completer(Configurable) configuration #------------------------------------------------------------------------------ ## Enable auto-closing dictionary keys. -# +# # When enabled string keys will be suffixed with a final quote (matching the # opening quote), tuple keys will also receive a separating comma if needed, and # keys which are final will receive a closing bracket (``]``). @@ -1139,15 +1139,15 @@ # c.Completer.debug = False ## Policy for code evaluation under completion. -# +# # Successive options allow to enable more eager evaluation for better # completion suggestions, including for nested dictionaries, nested lists, # or even results of function calls. # Setting ``unsafe`` or higher can lead to evaluation of arbitrary user # code on :kbd:`Tab` with potentially unwanted or dangerous side effects. -# +# # Allowed values are: -# +# # - ``forbidden``: no evaluation of code is permitted, # - ``minimal``: evaluation of literals and access to built-in namespace; # no item/attribute evaluationm no access to locals/globals, @@ -1164,13 +1164,13 @@ # c.Completer.evaluation = 'limited' ## Activate greedy completion. -# +# # .. deprecated:: 8.8 # Use :std:configtrait:`Completer.evaluation` and :std:configtrait:`Completer.auto_close_dict_keys` instead. -# +# # When enabled in IPython 8.8 or newer, changes configuration as # follows: -# +# # - ``Completer.evaluation = 'unsafe'`` # - ``Completer.auto_close_dict_keys = True`` # Default: False @@ -1192,7 +1192,7 @@ #------------------------------------------------------------------------------ ## Extension of the completer class with IPython-specific features -## +## # See also: Completer.auto_close_dict_keys # c.IPCompleter.auto_close_dict_keys = False @@ -1208,7 +1208,7 @@ # c.IPCompleter.debug = False ## List of matchers to disable. -# +# # The list should contain matcher identifiers (see # :any:`completion_matcher`). # Default: [] @@ -1228,35 +1228,35 @@ # c.IPCompleter.jedi_compute_type_timeout = 400 ## DEPRECATED as of version 5.0. -# +# # Instruct the completer to use __all__ for the completion -# +# # Specifically, when completing on ``object.``. -# +# # When True: only those names in obj.__all__ will be included. -# +# # When False [default]: the __all__ attribute is ignored # Default: False # c.IPCompleter.limit_to__all__ = False ## Whether to merge completion results into a single list -# +# # If False, only the completion results from the first non-empty # completer will be returned. -# +# # As of version 8.6.0, setting the value to ``False`` is an alias for: # ``IPCompleter.suppress_competing_matchers = True.``. # Default: True # c.IPCompleter.merge_completions = True ## Instruct the completer to omit private method names -# +# # Specifically, when completing on ``object.``. -# +# # When 2 [default]: all names that start with '_' will be excluded. -# +# # When 1: all 'magic' names (``__foo__``) will be excluded. -# +# # When 0: nothing will be excluded. # Choices: any of [0, 1, 2] # Default: 2 @@ -1271,19 +1271,19 @@ # c.IPCompleter.profiler_output_dir = '.completion_profiles' ## Whether to suppress completions from other *Matchers*. -# +# # When set to ``None`` (default) the matchers will attempt to auto-detect # whether suppression of other matchers is desirable. For example, at the # beginning of a line followed by `%` we expect a magic completion to be the # only applicable option, and after ``my_dict['`` we usually expect a completion # with an existing dictionary key. -# +# # If you want to disable this heuristic and see completions from all matchers, # set ``IPCompleter.suppress_competing_matchers = False``. To disable the # heuristic for specific matchers provide a dictionary mapping: # ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': # False}``. -# +# # Set ``IPCompleter.suppress_competing_matchers = True`` to limit completions to # the set of matchers with the highest priority; this is equivalent to # ``IPCompleter.merge_completions`` and can be beneficial for performance, but @@ -1301,22 +1301,22 @@ # ScriptMagics(Magics) configuration #------------------------------------------------------------------------------ ## Magics for talking to scripts -# +# # This defines a base `%%script` cell magic for running a cell # with a program in a subprocess, and registers a few top-level # magics that call %%script with common interpreters. ## Extra script cell magics to define -# +# # This generates simple wrappers of `%%script foo` as `%%foo`. -# +# # If you want to add script magics that aren't on your path, # specify them in script_paths # Default: [] # c.ScriptMagics.script_magics = [] ## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby' -# +# # Only necessary for items in script_magics where the default path will not # find the right interpreter. # Default: {} @@ -1335,7 +1335,7 @@ # StoreMagics(Magics) configuration #------------------------------------------------------------------------------ ## Lightweight persistence for python variables. -# +# # Provides the %store magic. ## If True, any %store-d variables will be automatically restored diff --git a/tools/payments/ipython-profile/startup/00-setup.py b/tools/payments/ipython-profile/startup/00-setup.py index 383e9eaf5..d253cdc14 100644 --- a/tools/payments/ipython-profile/startup/00-setup.py +++ b/tools/payments/ipython-profile/startup/00-setup.py @@ -26,6 +26,9 @@ environment = "dev" del aws_vault +if os.getenv("NITRO_ENCLAVE_MOCKING", ""): + environment = "local" + # FIXME TODO pull env vars and set during pcr verification secrets_s3_bucket = "" @@ -40,8 +43,7 @@ os.putenv("REDISCLI_AUTH", redis_password) if os.getenv("NITRO_ENCLAVE_MOCKING", ""): - pcr2 = "abc2" * (96 / 4) - operator_key = "../../payment-test/secrets/payment-test-operator.pem" + operator_key = os.getenv("BAT_OPERATOR_KEY") jobs = bg.BackgroundJobManager() @@ -81,6 +83,8 @@ def _set_redis_credentials(): def _get_web_env(): global redis_username, redis_password env = os.environ.copy() + if os.getenv("NITRO_ENCLAVE_MOCKING", ""): + return env if "AWS_VAULT" in env: del env["AWS_VAULT"] p = subprocess.Popen(["kubectl", "--context", cluster, "--namespace", f"payment-{environment}", "get", "deployments", "web", "-o", "json"], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -101,6 +105,8 @@ def _set_secrets_s3_bucket(): secrets_s3_bucket = env["ENCLAVE_CONFIG_BUCKET_NAME"] def _get_pcr2(): + if os.getenv("NITRO_ENCLAVE_MOCKING", ""): + return "ced2" * 24 env = os.environ.copy() web_env = _get_web_env() env["EIF_COMMAND"] = web_env["EIF_COMMAND"]