8000 Add --max-log-requests flag to limit concurrent requests (#224) · stern/stern@0b939c5 · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

Commit 0b939c5

Browse files
author
Takashi Kusumi
authored
Add --max-log-requests flag to limit concurrent requests (#224)
1 parent 8312782 commit 0b939c5

File tree

4 files changed

+70
-36
lines changed

4 files changed

+70
-36
lines changed

README.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ Supported Kubernetes resources are `pod`, `replicationcontroller`, `service`, `d
8585
`--include`, `-i` | `[]` | Log lines to include. (regular expression)
8686
`--init-containers` | `true` | Include or exclude init containers.
8787
`--kubeconfig` | | Path to kubeconfig file to use. Default to KUBECONFIG variable then ~/.kube/config path.
88+
`--max-log-requests` | `-1` | Maximum number of concurrent logs to request. Defaults to 50, but 5 when specifying --no-follow
8889
`--namespace`, `-n` | | Kubernetes namespace to use. Default to namespace configured in kubernetes context. To specify multiple namespaces, repeat this or set comma-separated value.
8990
`--no-follow` | `false` | Exit when all logs have been shown.
9091
`--only-log-lines` | `false` | Print only log lines
@@ -147,6 +148,20 @@ It is useful when you want to know how stern interacts with a Kubernetes API ser
147148

148149
Increasing the verbosity increases the number of logs. `--verbosity 6` would be a good starting point.
149150

151+
### Max log requests
152+
153+
Stern has the maximum number of concurrent logs to request to prevent unintentional load to a cluster.
154+
The number can be configured by the `--max-log-requests` flag.
155+
156+
The behavior and the default are different depending on the presence of the `--no-follow` flag.
157+
158+
| `--no-follow` | default | behavior |
159+
|---------------|---------|------------------|
160+
| specified | 5 | limits the number of concurrent logs to request |
161+
| not specified | 50 | exits with an error when if it reaches the concurrent limit |
162+
163+
The combination of `--max-log-requests 1` and `--no-follow` will be helpful if you want to show logs in order.
164+
150165
## Examples:
151166

152167
Tail the `gateway` container running inside of the `envvars` pod on staging

cmd/cmd.go

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ type options struct {
6969
resource string
7070
verbosity int
7171
onlyLogLines bool
72+
maxLogRequests int
7273
}
7374

7475
func NewOptions(streams genericclioptions.IOStreams) *options {
@@ -88,6 +89,7 @@ func NewOptions(streams genericclioptions.IOStreams) *options {
8889
timezone: "Local",
8990
prompt: false,
9091
noFollow: false,
92+
maxLogRequests: -1,
9193
}
9294
}
9395

@@ -313,6 +315,15 @@ func (o *options) sternConfig() (*stern.Config, error) {
313315
return nil, err
314316
}
315317

318+
maxLogRequests := o.maxLogRequests
319+
if maxLogRequests == -1 {
320+
if o.noFollow {
321+
maxLogRequests = 5
322+
} else {
323+
maxLogRequests = 50
324+
}
325+
}
326+
316327
return &stern.Config{
317328
KubeConfig: o.kubeConfig,
318329
ContextName: o.context,
@@ -337,6 +348,7 @@ func (o *options) sternConfig() (*stern.Config, error) {
337348
Follow: !o.noFollow,
338349
Resource: o.resource,
339350
OnlyLogLines: o.onlyLogLines,
351+
MaxLogRequests: maxLogRequests,
340352

341353
Out: o.Out,
342354
ErrOut: o.ErrOut,
@@ -375,6 +387,7 @@ func (o *options) AddFlags(fs *pflag.FlagSet) {
375387
fs.StringVar(&o.kubeConfig, "kube-config", o.kubeConfig, "Path to kubeconfig file to use.")
376388
_ = fs.MarkDeprecated("kube-config", "Use --kubeconfig instead.")
377389
fs.StringSliceVarP(&o.namespaces, "namespace", "n", o.namespaces, "Kubernetes namespace to use. Default to namespace configured in kubernetes context. To specify multiple namespaces, repeat this or set comma-separated value.")
390+
fs.IntVar(&o.maxLogRequests, "max-log-requests", o.maxLogRequests, "Maximum number of concurrent logs to request. Defaults to 50, but 5 when specifying --no-follow")
378391
fs.StringVarP(&o.output, "output", "o", o.output, "Specify predefined template. Currently support: [default, raw, json, extjson, ppextjson]")
379392
fs.BoolVarP(&o.prompt, "prompt", "p", o.prompt, "Toggle interactive prompt for selecting 'app.kubernetes.io/instance' label values.")
380393
fs.StringVarP(&o.selector, "selector", "l", o.selector, "Selector (label query) to filter on. If present, default to \".*\" for the pod-query.")

stern/config.go

+1Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ type Config struct {
4949
Follow bool
5050
Resource string
5151
OnlyLogLines bool
52+
MaxLogRequests int
5253

5354
Out io.Writer
5455
ErrOut io.Writer

stern/stern.go

Lines changed: 41 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@ import (
2121
"strings"
2222
"time"
2323

24+
"sync/atomic"
25+
2426
"github.com/pkg/errors"
2527

2628
"github.com/stern/stern/kubernetes"
@@ -107,6 +109,7 @@ func Run(ctx context.Context, config *Config) error {
107109

108110
if !config.Follow {
109111
var eg errgroup.Group
112+
eg.SetLimit(config.MaxLogRequests)
110113
for _, n := range namespaces {
111114
selector, err := chooseSelector(ctx, client, n, resource.kind, resource.name, config.LabelSelector)
112115
if err != nil {
@@ -133,12 +136,33 @@ func Run(ctx context.Context, config *Config) error {
133136
return eg.Wait()
134137
}
135138

136-
added := make(chan *Target)
137-
errCh := make(chan error)
139+
tailTarget := func(ctx context.Context, target *Target) {
140+
// We use a rate limiter to prevent a burst of retries.
141+
// It also enables us to retry immediately, in most cases,
142+
// when it is disconnected on the way.
143+
limiter := rate.NewLimiter(rate.Every(time.Second*10), 3)
144+
for {
145+
if err := limiter.Wait(ctx); err != nil {
146+
fmt.Fprintf(config.ErrOut, "failed to retry: %v\n", err)
147+
return
148+
}
149+
tail := newTail(target)
150+
err := tail.Start(ctx)
151+
tail.Close()
152+
if err == nil {
153+
return
154+
}
155+
if !filter.isActive(target) {
156+
fmt.Fprintf(config.ErrOut, "failed to tail: %v\n", err)
157+
return
158+
}
159+
fmt.Fprintf(config.ErrOut, "failed to tail: %v, will retry\n", err)
160+
}
161+
}
138162

139-
defer close(added)
163+
var numRequests atomic.Int64
164+
errCh := make(chan error)
140165
defer close(errCh)
141-
142166
for _, n := range namespaces {
143167
selector, err := chooseSelector(ctx, client, n, resource.kind, resource.name, config.LabelSelector)
144168
if err != nil {
@@ -157,49 +181,30 @@ func Run(ctx context.Context, config *Config) error {
157181
go func() {
158182
for {
159183
select {
160-
case v, ok := <-a:
184+
case target, ok := <-a:
161185
if !ok {
162186
errCh <- fmt.Errorf("lost watch connection")
163187
return
164188
}
165-
added <- v
189+
numRequests.Add(1)
190+
if numRequests.Load() > int64(config.MaxLogRequests) {
191+
errCh <- fmt.Errorf(
192+
"stern reached the maximum number of log requests (%d),"+
193+
" use --max-log-requests to increase the limit",
194+
config.MaxLogRequests)
195+
return
196+
}
197+
go func() {
198+
tailTarget(ctx, target)
199+
numRequests.Add(-1)
200+
}()
166201
case <-ctx.Done():
167202
return
168203
}
169204
}
170205
}()
171206
}
172207

173-
addTarget := func(ctx context.Context, target *Target) {
174-
// We use a rate limiter to prevent a burst of retries.
175-
// It also enables us to retry immediately, in most cases,
176-
// when it is disconnected on the way.
177-
limiter := rate.NewLimiter(rate.Every(time.Second*10), 3)
178-
for {
179-
if err := limiter.Wait(ctx); err != nil {
180-
fmt.Fprintf(config.ErrOut, "failed to retry: %v\n", err)
181-
return
182-
}
183-
tail := newTail(target)
184-
err := tail.Start(ctx)
185-
tail.Close()
186-
if err == nil {
187-
return
188-
}
189-
if !filter.isActive(target) {
190-
fmt.Fprintf(config.ErrOut, "failed to tail: %v\n", err)
191-
return
192-
}
193-
fmt.Fprintf(config.ErrOut, "failed to tail: %v, will retry\n", err)
194-
}
195-
}
196-
197-
go func() {
198-
for target := range added {
199-
go addTarget(ctx, target)
200-
}
201-
}()
202-
203208
select {
204209
case e := <-errCh:
205210
return e

0 commit comments

Comments
 (0)
0