Skip to content

Commit

Permalink
small improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
tropnikovvl committed Oct 18, 2024
1 parent 07c6c65 commit 25b2c99
Show file tree
Hide file tree
Showing 7 changed files with 76 additions and 108 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ FROM golang:1.23 AS builder

WORKDIR /build

COPY ./ /build
COPY . .

RUN CGO_ENABLED=0 go build -a

Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ Run from command-line - example with minimal parameter list:
Run as docker container - example for local s3-like buckets with ssl disabled:

```sh
docker run -p 9655:9655 -d -e LISTEN_PORT=:9655 -e S3_DISABLE_SSL=True -e S3_ENDPOINT=192.168.0.1:7480 -e S3_ACCESS_KEY=akces123 -e S3_SECRET_KEY=secret123 -e S3_BUCKET_NAME=my-bucket-name docker.io/tropnikovvl/s3bucket_exporter:1.1.0
docker run -p 9655:9655 -d -e LISTEN_PORT=:9655 -e S3_DISABLE_SSL=True -e S3_ENDPOINT=192.168.0.1:7480 -e S3_ACCESS_KEY=akces123 -e S3_SECRET_KEY=secret123 -e S3_BUCKET_NAME=my-bucket-name docker.io/tropnikovvl/s3bucket_exporter:1.2.0
```

Run from command-line - example for AWS
Expand Down
86 changes: 28 additions & 58 deletions controllers/s3talker.go
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
package controllers

import (
"encoding/json"
"errors"
"os"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
Expand All @@ -27,7 +25,7 @@ type S3Summary struct {
S3Status bool `json:"s3Status"`
S3Size float64 `json:"s3Size"`
S3ObjectNumber float64 `json:"s3ObjectNumber"`
S3Buckets Buckets `json:"s3Bucket"`
S3Buckets Buckets `json:"s3Buckets"`
}

// S3Conn struct - keeps information about remote S3
Expand All @@ -44,8 +42,7 @@ type S3Conn struct {

// S3UsageInfo - gets s3 connection details return s3Summary
func S3UsageInfo(s3Conn S3Conn, s3BucketName string) (S3Summary, error) {
summary := S3Summary{}

summary := S3Summary{S3Status: false}
s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials(s3Conn.S3ConnAccessKey, s3Conn.S3ConnSecretKey, ""),
Endpoint: aws.String(s3Conn.S3ConnEndpoint),
Expand All @@ -57,101 +54,74 @@ func S3UsageInfo(s3Conn S3Conn, s3BucketName string) (S3Summary, error) {

sess, err := session.NewSession(s3Config)
if err != nil {
log.Errorf("failed to create AWS session: %v", err)
log.Errorf("Failed to create AWS session: %v", err)
return summary, err
}

s3Client := s3.New(sess)

return checkBuckets(s3BucketName, s3Client, summary)

return fetchBucketData(s3BucketName, s3Client, summary)
}

func checkBuckets(s3BucketName string, s3Client *s3.S3, summary S3Summary) (S3Summary, error) {
var err error

func fetchBucketData(s3BucketName string, s3Client *s3.S3, summary S3Summary) (S3Summary, error) {
// checkSingleBucket - retrieves data for a specific bucket
if s3BucketName != "" {
summary, err = processBucket(s3BucketName, s3Client, summary)
if err != nil {
summary.S3Status = false
log.Errorf("Failed to get metrics for bucket %s: %v", s3BucketName, err)
} else {
summary.S3Status = true
}
return saveSummary(summary)
return processBucket(s3BucketName, s3Client, summary)
}

// checkAllBuckets - retrieves data for all available buckets
result, err := s3Client.ListBuckets(nil)
if err != nil {
log.Errorf("Connection to S3 endpoint failed: %v", err)
summary.S3Status = false
return summary, errors.New("s3 endpoint: unable to connect")
} else {
summary.S3Status = true
log.Errorf("Failed to list buckets: %v", err)
return summary, errors.New("unable to connect to S3 endpoint")
}

// Calculate data for each bucket
for _, b := range result.Buckets {
summary, err = processBucket(aws.StringValue(b.Name), s3Client, summary)
if err != nil {
log.Errorf("Failed to get metrics for bucket %s: %v", aws.StringValue(b.Name), err)
if summary, err = processBucket(aws.StringValue(b.Name), s3Client, summary); err != nil {
log.Errorf("Failed to process bucket %s: %v", aws.StringValue(b.Name), err)
continue
}
}

return saveSummary(summary)
return summary, nil
}

// processBucket retrieves size and object count metrics for a specific bucket.
func processBucket(bucketName string, s3Client *s3.S3, summary S3Summary) (S3Summary, error) {
size, number, err := countBucketSize(bucketName, s3Client)
size, count, err := calculateBucketMetrics(bucketName, s3Client)
if err != nil {
log.Errorf("failed to get metrics for bucket %s: %v", bucketName, err)
log.Errorf("Failed to get metrics for bucket %s: %v", bucketName, err)
return summary, err
}

bucket := Bucket{
BucketName: bucketName,
BucketSize: size,
BucketObjectNumber: number,
BucketObjectNumber: count,
}
summary.S3Buckets = append(summary.S3Buckets, bucket)
summary.S3Size += size
summary.S3ObjectNumber += number
summary.S3ObjectNumber += count
summary.S3Status = true

return summary, nil
}

// saveSummary - saves the S3 summary to a JSON file
func saveSummary(summary S3Summary) (S3Summary, error) {
byteArray, err := json.MarshalIndent(summary, "", " ")
if err != nil {
log.Errorf("failed to marshal S3 summary to JSON: %v", err)
return summary, err
}
if err := os.WriteFile("s3Information.json", byteArray, 0600); err != nil {
log.Errorf("failed to write S3 summary to file: %v", err)
return summary, err
}
return summary, nil
}

// countBucketSize - calculates the size and number of objects in a bucket
func countBucketSize(bucketName string, s3Client *s3.S3) (float64, float64, error) {
var bucketUsage, bucketObjects float64
// calculateBucketMetrics computes the total size and object count for a bucket.
func calculateBucketMetrics(bucketName string, s3Client *s3.S3) (float64, float64, error) {
var totalSize, objectCount float64

err := s3Client.ListObjectsV2Pages(&s3.ListObjectsV2Input{Bucket: aws.String(bucketName)},
func(p *s3.ListObjectsV2Output, _ bool) bool {
for _, obj := range p.Contents {
bucketUsage += float64(*obj.Size)
bucketObjects++
func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, obj := range page.Contents {
totalSize += float64(*obj.Size)
objectCount++
}
return true
return !lastPage
})

if err != nil {
log.Errorf("failed to list objects for bucket %s: %v", bucketName, err)
log.Errorf("Failed to list objects for bucket %s: %v", bucketName, err)
return 0, 0, err
}
return bucketUsage, bucketObjects, nil
return totalSize, objectCount, nil
}
2 changes: 1 addition & 1 deletion docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
version: "3.5"
services:
s3bucketexporter:
image: docker.io/tropnikovvl/s3bucket_exporter:1.1.0
image: docker.io/tropnikovvl/s3bucket_exporter:1.2.0
restart: always
ports:
- "9655:9655"
Expand Down
4 changes: 2 additions & 2 deletions helm/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.2.0
version: 0.3.0

# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.1.0"
appVersion: "1.2.0"
12 changes: 6 additions & 6 deletions helm/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -118,14 +118,14 @@ autoscaling:
# targetMemoryUtilizationPercentage: 80

# Additional volumes on the output Deployment definition.
volumes:
- name: tmp
emptyDir: {}
volumes: {}
# - name: tmp
# emptyDir: {}

# Additional volumeMounts on the output Deployment definition.
volumeMounts:
- name: tmp
mountPath: /tmp
volumeMounts: {}
# - name: tmp
# mountPath: /tmp

nodeSelector: {}

Expand Down
76 changes: 37 additions & 39 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,18 +14,19 @@ import (
)

var (
up = prometheus.NewDesc("s3_endpoint_up", "Connection to S3 successful", []string{"s3Endpoint"}, nil)
listenPort = ":9655"
s3Endpoint = ""
s3AccessKey = ""
s3SecretKey = ""
s3DisableSSL = false
s3BucketName = ""
s3DisableEndpointHostPrefix = false
s3ForcePathStyle = false
s3Region = "us-east-1"
up = prometheus.NewDesc("s3_endpoint_up", "Connection to S3 successful", []string{"s3Endpoint"}, nil)

listenPort string
logLevel string
s3Endpoint string
s3BucketName string
s3AccessKey string
s3SecretKey string
s3Region string
s3DisableSSL bool
s3DisableEndpointHostPrefix bool
s3ForcePathStyle bool
s3Conn controllers.S3Conn
logLevel = "info"
)

func envString(key, def string) string {
Expand All @@ -44,16 +45,16 @@ func envBool(key string, def bool) bool {
}

func init() {
flag.StringVar(&s3Endpoint, "s3_endpoint", envString("S3_ENDPOINT", s3Endpoint), "S3_ENDPOINT - eg. myceph.com:7480")
flag.StringVar(&s3AccessKey, "s3_access_key", envString("S3_ACCESS_KEY", s3AccessKey), "S3_ACCESS_KEY - aws_access_key")
flag.StringVar(&s3SecretKey, "s3_secret_key", envString("S3_SECRET_KEY", s3SecretKey), "S3_SECRET_KEY - aws_secret_key")
flag.StringVar(&s3BucketName, "s3_bucket_name", envString("S3_BUCKET_NAME", s3BucketName), "S3_BUCKET_NAME")
flag.StringVar(&s3Region, "s3_region", envString("S3_REGION", s3Region), "S3_REGION")
flag.StringVar(&listenPort, "listen_port", envString("LISTEN_PORT", listenPort), "LISTEN_PORT e.g ':9655'")
flag.StringVar(&logLevel, "log_level", envString("LOG_LEVEL", logLevel), "LOG_LEVEL")
flag.BoolVar(&s3DisableSSL, "s3_disable_ssl", envBool("S3_DISABLE_SSL", s3DisableSSL), "s3 disable ssl")
flag.BoolVar(&s3DisableEndpointHostPrefix, "s3_disable_endpoint_host_prefix", envBool("S3_DISABLE_ENDPOINT_HOST_PREFIX", s3DisableEndpointHostPrefix), "S3_DISABLE_ENDPOINT_HOST_PREFIX")
flag.BoolVar(&s3ForcePathStyle, "s3_force_path_style", envBool("S3_FORCE_PATH_STYLE", s3ForcePathStyle), "S3_FORCE_PATH_STYLE")
flag.StringVar(&s3Endpoint, "s3_endpoint", envString("S3_ENDPOINT", ""), "S3_ENDPOINT - eg. myceph.com:7480")
flag.StringVar(&s3AccessKey, "s3_access_key", envString("S3_ACCESS_KEY", ""), "S3_ACCESS_KEY - aws_access_key")
flag.StringVar(&s3SecretKey, "s3_secret_key", envString("S3_SECRET_KEY", ""), "S3_SECRET_KEY - aws_secret_key")
flag.StringVar(&s3BucketName, "s3_bucket_name", envString("S3_BUCKET_NAME", ""), "S3_BUCKET_NAME")
flag.StringVar(&s3Region, "s3_region", envString("S3_REGION", "us-east-1"), "S3_REGION")
flag.StringVar(&listenPort, "listen_port", envString("LISTEN_PORT", ":9655"), "LISTEN_PORT e.g ':9655'")
flag.StringVar(&logLevel, "log_level", envString("LOG_LEVEL", "info"), "LOG_LEVEL")
flag.BoolVar(&s3DisableSSL, "s3_disable_ssl", envBool("S3_DISABLE_SSL", false), "s3 disable ssl")
flag.BoolVar(&s3DisableEndpointHostPrefix, "s3_disable_endpoint_host_prefix", envBool("S3_DISABLE_ENDPOINT_HOST_PREFIX", false), "S3_DISABLE_ENDPOINT_HOST_PREFIX")
flag.BoolVar(&s3ForcePathStyle, "s3_force_path_style", envBool("S3_FORCE_PATH_STYLE", false), "S3_FORCE_PATH_STYLE")
flag.Parse()
}

Expand Down Expand Up @@ -92,7 +93,7 @@ func (c S3Collector) Collect(ch chan<- prometheus.Metric) {
}

ch <- prometheus.MustNewConstMetric(up, prometheus.GaugeValue, float64(s3Status), s3Endpoint)
log.Debug("s3metrics read from s3_endpoint :", s3metrics)
log.Debugf("Metrics fetched from %s: %+v", s3Endpoint, s3metrics)

descS := prometheus.NewDesc("s3_total_size", "S3 Total Bucket Size", []string{"s3Endpoint"}, nil)
descON := prometheus.NewDesc("s3_total_object_number", "S3 Total Object Number", []string{"s3Endpoint"}, nil)
Expand All @@ -108,40 +109,30 @@ func (c S3Collector) Collect(ch chan<- prometheus.Metric) {
}
}

func healthHandler(w http.ResponseWriter, r *http.Request) {
func healthHandler(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)

_, err := w.Write([]byte("OK"))
if err != nil {
if _, err := w.Write([]byte("OK")); err != nil {
http.Error(w, "Failed to write response", http.StatusInternalServerError)
log.Fatalf("Error writing response: %v", err)
log.Errorf("Error writing health response: %v", err)
}
}

func main() {
level, err := log.ParseLevel(logLevel)
if err != nil {
log.Fatalf("Invalid log level: %v", logLevel)
log.Fatalf("Invalid log level: %s", logLevel)
}
log.SetLevel(level)

if s3AccessKey == "" || s3SecretKey == "" {
log.Fatal("Missing required S3 configuration")
log.Fatal("S3 access key and secret key are required")
}

c := S3Collector{}
prometheus.MustRegister(c)
prometheus.MustRegister(S3Collector{})

http.Handle("/metrics", promhttp.Handler())
http.HandleFunc("/health", healthHandler)

log.Infof("Beginning to serve on port %s", listenPort)
if s3BucketName != "" {
log.Infof("Monitoring S3 bucket: %s", s3BucketName)
} else {
log.Infof("Monitoring all S3 buckets in the %s region", s3Region)
}

srv := &http.Server{
Addr: listenPort,
Handler: nil,
Expand All @@ -150,7 +141,14 @@ func main() {
IdleTimeout: 120 * time.Second,
}

log.Infof("Starting server on %s", listenPort)
if s3BucketName != "" {
log.Infof("Monitoring bucket: %s in region %s", s3BucketName, s3Region)
} else {
log.Infof("Monitoring all buckets in region: %s", s3Region)
}

if err := srv.ListenAndServe(); err != nil {
log.Fatalf("Failed to start server: %v", err)
log.Fatalf("Server failed to start: %v", err)
}
}

0 comments on commit 25b2c99

Please sign in to comment.