idx
int64 0
41.8k
| question
stringlengths 69
3.84k
| target
stringlengths 11
1.18k
|
|---|---|---|
0
|
func ( mock * Mock ) WriteFile ( file string , data [ ] byte ) error { items := mock . getReturnValues ( "WriteFile" )
return items [ 0 ] . ( error )
}
|
WriteFile mocks original method
|
1
|
func ( mock * Mock ) FileExists ( file string ) bool { items := mock . getReturnValues ( "FileExists" )
return items [ 0 ] . ( bool )
}
|
FileExists mocks original method
|
2
|
func ( mock * Mock ) GetFileNames ( paths [ ] string ) ( [ ] string , error ) { items := mock . getReturnValues ( "GetFileNames" )
if len ( items ) == 1 { switch typed := items [ 0 ] . ( type ) { case [ ] string : return typed , nil
case error : return [ ] string { } , typed
}
} else if len ( items ) == 2 { return items [ 0 ] . ( [ ] string ) , items [ 1 ] . ( error )
}
return [ ] string { } , nil
}
|
GetFileNames mocks original method
|
3
|
func ( mock * Mock ) Watch ( paths [ ] string , onEvent func ( event fsnotify . Event ) , onClose func ( ) ) error { go func ( ) { for { select { case event , ok := <- mock . eventChan : if ! ok { onClose ( )
return
}
onEvent ( event )
}
}
} ( )
return nil
}
|
Watch calls onEvent when event arrives and onClose when channel is closed
|
4
|
func NewAuthenticator ( router * mux . Router , ctx * Settings , log logging . Logger ) AuthenticatorAPI { a := & authenticator { router : router , log : log , formatter : render . New ( render . Options { IndentJSON : true , } ) , groupDb : make ( map [ string ] [ ] * access . PermissionGroup_Permissions ) , expTime : ctx . ExpTime , }
if ctx . AuthStore != nil { a . userDb = ctx . AuthStore
} else { a . userDb = CreateDefaultAuthDB ( )
}
signature = ctx . Signature
if a . expTime == 0 { a . expTime = defaultExpTime
a . log . Debugf ( "Token expiration time claim not set, defaulting to 1 hour" )
}
hash := "$2a$10$q5s1LP7xbCJWJlLet1g/h.rGrsHtciILps90bNRdJ.6DRekw9b.zK"
if err := a . userDb . AddUser ( admin , hash , [ ] string { admin } ) ; err != nil { a . log . Errorf ( "failed to add admin user: %v" , err )
}
for _ , user := range ctx . Users { if user . Name == admin { a . log . Errorf ( "rejected to create user-defined account named 'admin'" )
continue
}
if err := a . userDb . AddUser ( user . Name , user . PasswordHash , user . Permissions ) ; err != nil { a . log . Errorf ( "failed to add user %s: %v" , user . Name , err )
continue
}
a . log . Debug ( "Registered user %s, permissions: %v" , user . Name , user . Permissions )
}
a . groupDb [ admin ] = [ ] * access . PermissionGroup_Permissions { }
a . registerSecurityHandlers ( )
return a
}
|
NewAuthenticator prepares new instance of authenticator .
|
5
|
func ( a * authenticator ) AddPermissionGroup ( group ... * access . PermissionGroup ) { for _ , newPermissionGroup := range group { if _ , ok := a . groupDb [ newPermissionGroup . Name ] ; ok { a . log . Warnf ( "permission group %s already exists, skipped" )
continue
}
a . log . Debugf ( "added HTTP permission group %s" , newPermissionGroup . Name )
a . groupDb [ newPermissionGroup . Name ] = newPermissionGroup . Permissions
}
}
|
AddPermissionGroup adds new permission group .
|
6
|
func ( a * authenticator ) Validate ( provider http . HandlerFunc ) http . HandlerFunc { return http . HandlerFunc ( func ( w http . ResponseWriter , req * http . Request ) { tokenString , errCode , err := a . getTokenStringFromRequest ( req )
if err != nil { a . formatter . Text ( w , errCode , err . Error ( ) )
return
}
token , err := jwt . Parse ( tokenString , func ( token * jwt . Token ) ( interface { } , error ) { if _ , ok := jwt . GetSigningMethod ( token . Header [ "alg" ] . ( string ) ) . ( * jwt . SigningMethodHMAC ) ; ! ok { return nil , fmt . Errorf ( "error parsing token" )
}
return [ ] byte ( signature ) , nil
} )
if err != nil { errStr := fmt . Sprintf ( "500 internal server error: %s" , err )
a . formatter . Text ( w , http . StatusInternalServerError , errStr )
return
}
if token . Claims != nil { if err := token . Claims . Valid ( ) ; err != nil { errStr := fmt . Sprintf ( "401 Unauthorized: %v" , err )
a . formatter . Text ( w , http . StatusUnauthorized , errStr )
return
}
}
if err := a . validateToken ( token , req . URL . Path , req . Method ) ; err != nil { errStr := fmt . Sprintf ( "401 Unauthorized: %v" , err )
a . formatter . Text ( w , http . StatusUnauthorized , errStr )
return
}
provider . ServeHTTP ( w , req )
} )
}
|
Validate the request
|
7
|
func ( a * authenticator ) registerSecurityHandlers ( ) { a . router . HandleFunc ( login , a . loginHandler ) . Methods ( http . MethodGet , http . MethodPost )
a . router . HandleFunc ( authenticate , a . authenticationHandler ) . Methods ( http . MethodPost )
a . router . HandleFunc ( logout , a . logoutHandler ) . Methods ( http . MethodPost )
}
|
Register authenticator - wide security handlers
|
8
|
func ( a * authenticator ) loginHandler ( w http . ResponseWriter , req * http . Request ) { if req . Method == http . MethodGet { r := render . New ( render . Options { Directory : "templates" , Asset : Asset , AssetNames : AssetNames , } )
r . HTML ( w , http . StatusOK , "login" , nil )
} else { credentials := & credentials { }
decoder := json . NewDecoder ( req . Body )
err := decoder . Decode ( & credentials )
if err != nil { errStr := fmt . Sprintf ( "500 internal server error: failed to decode json: %v" , err )
a . formatter . Text ( w , http . StatusInternalServerError , errStr )
return
}
token , errCode , err := a . getTokenFor ( credentials )
if err != nil { a . formatter . Text ( w , errCode , err . Error ( ) )
return
}
a . formatter . Text ( w , http . StatusOK , token )
}
}
|
Login handler shows simple page to log in
|
9
|
func ( a * authenticator ) logoutHandler ( w http . ResponseWriter , req * http . Request ) { decoder := json . NewDecoder ( req . Body )
var credentials credentials
err := decoder . Decode ( & credentials )
if err != nil { errStr := fmt . Sprintf ( "500 internal server error: failed to decode json: %v" , err )
a . formatter . Text ( w , http . StatusInternalServerError , errStr )
return
}
a . userDb . SetLogoutTime ( credentials . Username )
a . log . Debugf ( "user %s was logged out" , credentials . Username )
}
|
Removes token endpoint from the DB . During processing token will not be found and will be considered as invalid .
|
10
|
func ( a * authenticator ) getTokenStringFromRequest ( req * http . Request ) ( result string , errCode int , err error ) { authHeader := req . Header . Get ( AuthHeaderKey )
if authHeader != "" { bearerToken := strings . Split ( authHeader , " " )
if len ( bearerToken ) != 2 { return "" , http . StatusUnauthorized , fmt . Errorf ( "401 Unauthorized: invalid authorization token" )
}
if bearerToken [ 0 ] != "Bearer" { return "" , http . StatusUnauthorized , fmt . Errorf ( "401 Unauthorized: invalid authorization header" )
}
return bearerToken [ 1 ] , 0 , nil
}
a . log . Debugf ( "Authentication header not found (err: %v)" , err )
cookie , err := req . Cookie ( cookieName )
if err == nil && cookie != nil { return cookie . Value , 0 , nil
}
a . log . Debugf ( "Authentication cookie not found (err: %v)" , err )
return "" , http . StatusUnauthorized , fmt . Errorf ( "401 Unauthorized: authorization required" )
}
|
Read raw token from request .
|
11
|
func ( a * authenticator ) getTokenFor ( credentials * credentials ) ( string , int , error ) { name , errCode , err := a . validateCredentials ( credentials )
if err != nil { return "" , errCode , err
}
claims := jwt . StandardClaims { Audience : name , ExpiresAt : a . expTime . Nanoseconds ( ) , }
token := jwt . NewWithClaims ( jwt . SigningMethodHS256 , claims )
tokenString , err := token . SignedString ( [ ] byte ( signature ) )
if err != nil { return "" , http . StatusInternalServerError , fmt . Errorf ( "500 internal server error: failed to sign token: %v" , err )
}
a . userDb . SetLoginTime ( name )
a . log . Debugf ( "user %s was logged in" , name )
return tokenString , 0 , nil
}
|
Get token for credentials
|
12
|
func ( a * authenticator ) validateToken ( token * jwt . Token , url , method string ) error { var userName string
switch v := token . Claims . ( type ) { case jwt . MapClaims : var ok bool
if userName , ok = v [ "aud" ] . ( string ) ; ! ok { return fmt . Errorf ( "failed to validate token claims audience" )
}
case jwt . StandardClaims : userName = v . Audience
default : return fmt . Errorf ( "failed to validate token claims" )
}
loggedOut , err := a . userDb . IsLoggedOut ( userName )
if err != nil { return fmt . Errorf ( "failed to validate token: %v" , err )
}
if loggedOut { token . Valid = false
return fmt . Errorf ( "invalid token" )
}
user , err := a . userDb . GetUser ( userName )
if err != nil { return fmt . Errorf ( "failed to validate token: %v" , err )
}
if userIsAdmin ( user ) { return nil
}
perms := a . getPermissionsForURL ( url , method )
for _ , userPerm := range user . Permissions { for _ , perm := range perms { if userPerm == perm { return nil
}
}
}
return fmt . Errorf ( "not permitted" )
}
|
Validates token itself and permissions
|
13
|
func userIsAdmin ( user * User ) bool { for _ , permission := range user . Permissions { if permission == admin { return true
}
}
return false
}
|
Checks user admin permission
|
14
|
func ( plugin * ExamplePlugin ) AfterInit ( ) ( err error ) { late := plugin . Log . NewLogger ( "late" )
late . Debugf ( "late debug message" )
plugin . Log . Info ( "logs in plugin example finished, sending shutdown ..." )
close ( plugin . exampleFinished )
return nil
}
|
AfterInit demonstrates the usage of PluginLogger API .
|
15
|
func ( plugin * ExamplePlugin ) showPanicLog ( ) { defer func ( ) { if err := recover ( ) ; err != nil { plugin . Log . Info ( "Recovered from panic" )
}
} ( )
plugin . Log . Panic ( "Panic log: calls panic() after log, will be recovered" )
}
|
showPanicLog demonstrates panic log + recovering .
|
16
|
func ( d * PluginDeps ) SetupLog ( ) { if d . Log == nil { d . Log = logging . ForPlugin ( d . String ( ) )
}
}
|
SetupLog sets up default instance for plugin log dep .
|
17
|
func ( d * PluginDeps ) Setup ( ) { d . SetupLog ( )
if d . Cfg == nil { d . Cfg = config . ForPlugin ( d . String ( ) )
}
}
|
Setup sets up default instances for plugin deps .
|
18
|
func ( r * PrevRevisions ) Get ( key string ) ( found bool , value datasync . KeyVal ) { r . mu . RLock ( )
prev , found := r . revisions [ key ]
r . mu . RUnlock ( )
return found , prev
}
|
Get gets the last proto . Message with it s revision .
|
19
|
func ( r * PrevRevisions ) Put ( key string , val datasync . LazyValue ) ( found bool , prev datasync . KeyVal , currRev int64 ) { found , prev = r . Get ( key )
if prev != nil { currRev = prev . GetRevision ( ) + 1
} else { currRev = 0
}
r . mu . Lock ( )
r . revisions [ key ] = & valWithRev { LazyValue : val , key : key , rev : currRev , }
r . mu . Unlock ( )
return found , prev , currRev
}
|
Put updates the entry in the revisions and returns previous value .
|
20
|
func ( r * PrevRevisions ) PutWithRevision ( key string , inCurrent datasync . KeyVal ) ( found bool , prev datasync . KeyVal ) { found , prev = r . Get ( key )
currentRev := inCurrent . GetRevision ( )
if currentRev == 0 && prev != nil { currentRev = prev . GetRevision ( ) + 1
}
r . mu . Lock ( )
r . revisions [ key ] = & valWithRev { LazyValue : inCurrent , key : key , rev : currentRev , }
r . mu . Unlock ( )
return found , prev
}
|
PutWithRevision updates the entry in the revisions and returns previous value .
|
21
|
func ( r * PrevRevisions ) Del ( key string ) ( found bool , prev datasync . KeyVal ) { found , prev = r . Get ( key )
if found { r . mu . Lock ( )
delete ( r . revisions , key )
r . mu . Unlock ( )
}
return found , prev
}
|
Del deletes the entry from revisions and returns previous value .
|
22
|
func ( r * PrevRevisions ) ListKeys ( ) ( ret [ ] string ) { r . mu . RLock ( )
for key := range r . revisions { ret = append ( ret , key )
}
r . mu . RUnlock ( )
return ret
}
|
ListKeys returns all stored keys .
|
23
|
func ( r * PrevRevisions ) Cleanup ( ) { r . mu . Lock ( )
defer r . mu . Unlock ( )
r . revisions = make ( map [ string ] datasync . KeyVal )
}
|
Cleanup removes all data from the registry
|
24
|
func ( plugin * ExamplePlugin ) Close ( ) error { if plugin . db != nil { return plugin . db . Close ( )
}
return nil
}
|
Close closes ExamplePlugin
|
25
|
func ( plugin * ExamplePlugin ) etcdKey ( label string ) string { return "/vnf-agent/" + plugin . ServiceLabel . GetAgentLabel ( ) + "/api/v1/example/db/simple/" + label
}
|
The ETCD key prefix used for this example
|
26
|
func ( plugin * ExamplePlugin ) encryptData ( value string , publicKey * rsa . PublicKey ) ( string , error ) { encryptedValue , err := plugin . CryptoData . EncryptData ( [ ] byte ( value ) , publicKey )
if err != nil { return "" , err
}
return base64 . URLEncoding . EncodeToString ( encryptedValue ) , nil
}
|
encryptData first encrypts the provided value using crypto layer and then encodes the data with base64 for JSON compatibility
|
27
|
func ( plugin * ExamplePlugin ) newEtcdConnection ( configPath string ) ( * etcd . BytesConnectionEtcd , error ) { etcdFileConfig := & etcd . Config { }
err := config . ParseConfigFromYamlFile ( configPath , etcdFileConfig )
if err != nil { return nil , err
}
etcdConfig , err := etcd . ConfigToClient ( etcdFileConfig )
if err != nil { return nil , err
}
return etcd . NewEtcdConnectionWithBytes ( * etcdConfig , plugin . Log )
}
|
newEtcdConnection creates new ETCD bytes connection from provided etcd config path
|
28
|
func readPublicKey ( path string ) ( * rsa . PublicKey , error ) { bytes , err := ioutil . ReadFile ( path )
if err != nil { return nil , err
}
block , _ := pem . Decode ( bytes )
if block == nil { return nil , errors . New ( "failed to decode PEM for key " + path )
}
pubInterface , err := x509 . ParsePKIXPublicKey ( block . Bytes )
if err != nil { return nil , err
}
publicKey , ok := pubInterface . ( * rsa . PublicKey )
if ! ok { return nil , errors . New ( "failed to convert public key to rsa.PublicKey" )
}
return publicKey , nil
}
|
readPublicKey reads rsa public key from PEM file on provided path
|
29
|
func NewKvBytesPluginWrapper ( cbw keyval . KvBytesPlugin , decrypter ArbitraryDecrypter , decryptFunc DecryptFunc ) * KvBytesPluginWrapper { return & KvBytesPluginWrapper { KvBytesPlugin : cbw , decryptData : decryptData { decryptFunc : decryptFunc , decrypter : decrypter , } , }
}
|
NewKvBytesPluginWrapper creates wrapper for provided CoreBrokerWatcher adding support for decrypting encrypted data
|
30
|
func NewBytesBrokerWrapper ( pb keyval . BytesBroker , decrypter ArbitraryDecrypter , decryptFunc DecryptFunc ) * BytesBrokerWrapper { return & BytesBrokerWrapper { BytesBroker : pb , decryptData : decryptData { decryptFunc : decryptFunc , decrypter : decrypter , } , }
}
|
NewBytesBrokerWrapper creates wrapper for provided BytesBroker adding support for decrypting encrypted data
|
31
|
func NewBytesWatcherWrapper ( pb keyval . BytesWatcher , decrypter ArbitraryDecrypter , decryptFunc DecryptFunc ) * BytesWatcherWrapper { return & BytesWatcherWrapper { BytesWatcher : pb , decryptData : decryptData { decryptFunc : decryptFunc , decrypter : decrypter , } , }
}
|
NewBytesWatcherWrapper creates wrapper for provided BytesWatcher adding support for decrypting encrypted data
|
32
|
func ( cbb * BytesBrokerWrapper ) GetValue ( key string ) ( data [ ] byte , found bool , revision int64 , err error ) { data , found , revision , err = cbb . BytesBroker . GetValue ( key )
if err == nil { objData , err := cbb . decrypter . Decrypt ( data , cbb . decryptFunc )
if err != nil { return data , found , revision , err
}
outData , ok := objData . ( [ ] byte )
if ! ok { return data , found , revision , err
}
return outData , found , revision , err
}
return
}
|
GetValue retrieves and tries to decrypt one item under the provided key .
|
33
|
func ListenAndServe ( config Config , handler http . Handler ) ( srv * http . Server , err error ) { server := & http . Server { Addr : config . Endpoint , ReadTimeout : config . ReadTimeout , ReadHeaderTimeout : config . ReadHeaderTimeout , WriteTimeout : config . WriteTimeout , IdleTimeout : config . IdleTimeout , MaxHeaderBytes : config . MaxHeaderBytes , Handler : handler , }
if len ( config . ClientCerts ) > 0 { caCertPool := x509 . NewCertPool ( )
for _ , c := range config . ClientCerts { caCert , err := ioutil . ReadFile ( c )
if err != nil { return nil , err
}
caCertPool . AppendCertsFromPEM ( caCert )
}
server . TLSConfig = & tls . Config { ClientAuth : tls . RequireAndVerifyClientCert , ClientCAs : caCertPool , }
}
ln , err := net . Listen ( "tcp" , server . Addr )
if err != nil { return nil , err
}
l := tcpKeepAliveListener { ln . ( * net . TCPListener ) }
go func ( ) { var err error
if config . UseHTTPS ( ) { err = server . ServeTLS ( l , config . ServerCertfile , config . ServerKeyfile )
} else { err = server . Serve ( l )
}
logging . DefaultLogger . Debugf ( "HTTP server Serve: %v" , err )
} ( )
return server , nil
}
|
ListenAndServe starts a http server .
|
34
|
func ( sp * SerializerProto ) Unmarshal ( data [ ] byte , protoData proto . Message ) error { return proto . Unmarshal ( data , protoData )
}
|
Unmarshal deserializes data from slice of bytes into the provided protobuf message using proto marshaller .
|
35
|
func ( sp * SerializerProto ) Marshal ( message proto . Message ) ( [ ] byte , error ) { return proto . Marshal ( message )
}
|
Marshal serializes data from proto message to the slice of bytes using proto marshaller .
|
36
|
func ( sj * SerializerJSON ) Unmarshal ( data [ ] byte , protoData proto . Message ) error { return jsonpb . Unmarshal ( bytes . NewBuffer ( data ) , protoData )
}
|
Unmarshal deserializes data from slice of bytes into the provided protobuf message using jsonpb marshaller to correctly unmarshal protobuf data .
|
37
|
func ( sj * SerializerJSON ) Marshal ( message proto . Message ) ( [ ] byte , error ) { if message == nil { return [ ] byte ( "null" ) , nil
}
var buf bytes . Buffer
if err := DefaultMarshaler . Marshal ( & buf , message ) ; err != nil { return nil , err
}
return buf . Bytes ( ) , nil
}
|
Marshal serializes proto message to the slice of bytes using jsonpb marshaller to correctly marshal protobuf data .
|
38
|
func ( keys * watchBrokerKeys ) watchResync ( resyncReg resync . Registration ) { for resyncStatus := range resyncReg . StatusChan ( ) { if resyncStatus . ResyncStatus ( ) == resync . Started { err := keys . resync ( )
if err != nil { logrus . DefaultLogger ( ) . Errorf ( "getting resync data failed: %v" , err )
}
}
resyncStatus . Ack ( )
}
}
|
resyncReg . StatusChan == Started = > resync
|
39
|
func ( keys * watchBrokerKeys ) resyncRev ( ) error { for _ , keyPrefix := range keys . prefixes { revIt , err := keys . adapter . db . ListValues ( keyPrefix )
if err != nil { return err
}
for { data , stop := revIt . GetNext ( )
if stop { break
}
logrus . DefaultLogger ( ) . Debugf ( "registering key found in KV: %q" , data . GetKey ( ) )
keys . adapter . base . LastRev ( ) . PutWithRevision ( data . GetKey ( ) , syncbase . NewKeyVal ( data . GetKey ( ) , data , data . GetRevision ( ) ) )
}
}
return nil
}
|
ResyncRev fill the PrevRevision map . This step needs to be done even if resync is ommited
|
40
|
func NewConf ( ) * Config { return & Config { DefaultLevel : "" , Loggers : [ ] LoggerConfig { } , Hooks : make ( map [ string ] HookConfig ) , }
}
|
NewConf creates default configuration with InfoLevel & empty loggers . Suitable also for usage in flavor to programmatically specify default behavior .
|
41
|
func ( c * DbClient ) Add ( path string , entry * decoder . FileDataEntry ) { c . Lock ( )
defer c . Unlock ( )
if entry == nil { return
}
fileData , ok := c . db [ path ]
if ok { value , ok := fileData [ entry . Key ]
if ok { if ! bytes . Equal ( value . data , entry . Value ) { rev := value . rev + 1
fileData [ entry . Key ] = & dbEntry { entry . Value , rev }
}
} else { fileData [ entry . Key ] = & dbEntry { entry . Value , initialRev }
}
} else { fileData = map [ string ] * dbEntry { entry . Key : { entry . Value , initialRev } }
}
c . db [ path ] = fileData
}
|
Add puts new entry to the database or updates the old one if given key already exists
|
42
|
func ( c * DbClient ) Delete ( path , key string ) { c . Lock ( )
defer c . Unlock ( )
fileData , ok := c . db [ path ]
if ! ok { return
}
delete ( fileData , key )
}
|
Delete removes key in given path .
|
43
|
func ( c * DbClient ) DeleteFile ( path string ) { c . Lock ( )
defer c . Unlock ( )
delete ( c . db , path )
}
|
DeleteFile removes file entry including all keys within
|
44
|
func ( c * DbClient ) GetDataForPrefix ( prefix string ) [ ] * decoder . FileDataEntry { c . Lock ( )
defer c . Unlock ( )
var keyValues [ ] * decoder . FileDataEntry
for _ , file := range c . db { for key , value := range file { if strings . HasPrefix ( key , prefix ) { keyValues = append ( keyValues , & decoder . FileDataEntry { Key : key , Value : value . data , } )
}
}
}
return keyValues
}
|
GetDataForPrefix returns all values which match provided prefix
|
45
|
func ( c * DbClient ) GetDataForFile ( path string ) [ ] * decoder . FileDataEntry { c . Lock ( )
defer c . Unlock ( )
var keyValues [ ] * decoder . FileDataEntry
if dbKeyValues , ok := c . db [ path ] ; ok { for key , value := range dbKeyValues { keyValues = append ( keyValues , & decoder . FileDataEntry { Key : key , Value : value . data , } )
}
}
return keyValues
}
|
GetDataForFile returns a map of key - value entries from given file
|
46
|
func ( c * DbClient ) GetDataForKey ( key string ) ( * decoder . FileDataEntry , bool ) { c . Lock ( )
defer c . Unlock ( )
for _ , file := range c . db { value , ok := file [ key ]
if ok { return & decoder . FileDataEntry { Key : key , Value : value . data , } , true
}
}
return nil , false
}
|
GetDataForKey returns data for given key .
|
47
|
func ConfigToClientConfig ( ymlConfig * Config ) ( * ClientConfig , error ) { timeout := defaultOpTimeout
if ymlConfig . OpTimeout > 0 { timeout = ymlConfig . OpTimeout
}
connectTimeout := defaultDialTimeout
if ymlConfig . DialTimeout > 0 { connectTimeout = ymlConfig . DialTimeout
}
reconnectInterval := defaultRedialInterval
if ymlConfig . RedialInterval > 0 { reconnectInterval = ymlConfig . RedialInterval
}
protoVersion := defaultProtocolVersion
if ymlConfig . ProtocolVersion > 0 { protoVersion = ymlConfig . ProtocolVersion
}
endpoints , port , err := getEndpointsAndPort ( ymlConfig . Endpoints )
if err != nil { return nil , err
}
var sslOpts * gocql . SslOptions
if ymlConfig . TLS . Enabled { sslOpts = & gocql . SslOptions { CaPath : ymlConfig . TLS . CAfile , CertPath : ymlConfig . TLS . Certfile , KeyPath : ymlConfig . TLS . Keyfile , EnableHostVerification : ymlConfig . TLS . EnableHostVerification , }
}
clientConfig := & gocql . ClusterConfig { Hosts : endpoints , Port : port , Timeout : timeout * time . Millisecond , ConnectTimeout : connectTimeout * time . Millisecond , ReconnectInterval : reconnectInterval * time . Second , ProtoVersion : protoVersion , SslOpts : sslOpts , }
cfg := & ClientConfig { ClusterConfig : clientConfig }
return cfg , nil
}
|
ConfigToClientConfig transforms the yaml configuration into ClientConfig . If the configuration of endpoints is invalid error ErrInvalidEndpointConfig is returned .
|
48
|
func ListenAndServe ( cfg * Config , srv * grpc . Server ) ( netListener net . Listener , err error ) { switch socketType := cfg . getSocketType ( ) ; socketType { case "unix" , "unixpacket" : permissions , err := getUnixSocketFilePermissions ( cfg . Permission )
if err != nil { return nil , err
}
if err := checkUnixSocketFileAndDirectory ( cfg . Endpoint , cfg . ForceSocketRemoval ) ; err != nil { return nil , err
}
netListener , err = net . Listen ( socketType , cfg . Endpoint )
if err != nil { return nil , err
}
if err := os . Chmod ( cfg . Endpoint , permissions ) ; err != nil { return nil , err
}
default : netListener , err = net . Listen ( socketType , cfg . Endpoint )
if err != nil { return nil , err
}
}
go func ( ) { err := srv . Serve ( netListener )
logging . DefaultLogger . Debugf ( "GRPC server Serve: %v" , err )
} ( )
return netListener , nil
}
|
ListenAndServe starts configured listener and serving for clients
|
49
|
func getUnixSocketFilePermissions ( permissions int ) ( os . FileMode , error ) { if permissions > 0 { if permissions > 7777 { return 0 , fmt . Errorf ( "incorrect unix socket file/path permission value '%d'" , permissions )
}
mode , err := strconv . ParseInt ( strconv . Itoa ( permissions ) , 8 , 32 )
if err != nil { return 0 , fmt . Errorf ( "failed to parse socket file permissions %d" , permissions )
}
return os . FileMode ( mode ) , nil
}
return os . ModePerm , nil
}
|
Resolve permissions and return FileMode
|
50
|
func newRegistration ( resyncName string , statusChan chan StatusEvent ) * registration { return & registration { resyncName : resyncName , statusChan : statusChan }
}
|
newRegistration is a constructor .
|
51
|
func newStatusEvent ( status Status ) * statusEvent { return & statusEvent { status : status , ackChan : make ( chan time . Time ) }
}
|
newStatusEvent is a constructor .
|
52
|
func NewLogRegistry ( ) logging . Registry { registry := & logRegistry { loggers : new ( sync . Map ) , logLevels : make ( map [ string ] logrus . Level ) , defaultLevel : initialLogLvl , }
registry . putLoggerToMapping ( defaultLogger )
return registry
}
|
NewLogRegistry is a constructor
|
53
|
func ( lr * logRegistry ) NewLogger ( name string ) logging . Logger { if existingLogger := lr . getLoggerFromMapping ( name ) ; existingLogger != nil { panic ( fmt . Errorf ( "logger with name '%s' already exists" , name ) )
}
if err := checkLoggerName ( name ) ; err != nil { panic ( err )
}
logger := NewLogger ( name )
if lvl , ok := lr . logLevels [ name ] ; ok { setLevel ( logger , lvl )
} else { setLevel ( logger , lr . defaultLevel )
}
lr . putLoggerToMapping ( logger )
for _ , hook := range lr . hooks { logger . std . AddHook ( hook )
}
return logger
}
|
NewLogger creates new named Logger instance . Name can be subsequently used to refer the logger in registry .
|
54
|
func ( lr * logRegistry ) SetLevel ( logger , level string ) error { lvl , err := logrus . ParseLevel ( level )
if err != nil { return err
}
if logger == "default" { lr . defaultLevel = lvl
return nil
}
lr . logLevels [ logger ] = lvl
logVal := lr . getLoggerFromMapping ( logger )
if logVal != nil { defaultLogger . Debugf ( "setting logger level: %v -> %v" , logVal . GetName ( ) , lvl . String ( ) )
return setLevel ( logVal , lvl )
}
return nil
}
|
SetLevel modifies log level of selected logger in the registry
|
55
|
func ( lr * logRegistry ) GetLevel ( logger string ) ( string , error ) { logVal := lr . getLoggerFromMapping ( logger )
if logVal == nil { return "" , fmt . Errorf ( "logger %s not found" , logger )
}
return logVal . GetLevel ( ) . String ( ) , nil
}
|
GetLevel returns the currently set log level of the logger
|
56
|
func ( lr * logRegistry ) Lookup ( loggerName string ) ( logger logging . Logger , found bool ) { loggerInt , found := lr . loggers . Load ( loggerName )
if ! found { return nil , false
}
logger , ok := loggerInt . ( * Logger )
if ok { return logger , found
}
panic ( fmt . Errorf ( "cannot cast log value to Logger obj" ) )
}
|
Lookup returns a logger instance identified by name from registry
|
57
|
func ( lr * logRegistry ) ClearRegistry ( ) { var wasErr error
lr . loggers . Range ( func ( k , v interface { } ) bool { key , ok := k . ( string )
if ! ok { wasErr = fmt . Errorf ( "cannot cast log map key to string" )
return false
}
if key != DefaultLoggerName { lr . loggers . Delete ( key )
}
return true
} )
if wasErr != nil { panic ( wasErr )
}
}
|
ClearRegistry removes all loggers except the default one from registry
|
58
|
func ( lr * logRegistry ) putLoggerToMapping ( logger * Logger ) { lr . loggers . Store ( logger . name , logger )
}
|
putLoggerToMapping writes logger into map of named loggers
|
59
|
func ( lr * logRegistry ) getLoggerFromMapping ( logger string ) * Logger { loggerVal , found := lr . loggers . Load ( logger )
if ! found { return nil
}
log , ok := loggerVal . ( * Logger )
if ok { return log
}
panic ( "cannot cast log value to Logger obj" )
}
|
getLoggerFromMapping returns a logger by its name
|
60
|
func ( lr * logRegistry ) AddHook ( hook logrus . Hook ) { defaultLogger . Infof ( "adding hook %q to registry" , hook )
lr . hooks = append ( lr . hooks , hook )
lgs := lr . ListLoggers ( )
for lg := range lgs { logger , found := lr . Lookup ( lg )
if found { logger . AddHook ( hook )
}
}
}
|
HookConfigs stores hook configs provided by log manager and applies hook to existing loggers
|
61
|
func ( p * Plugin ) Init ( ) error { var err error
p . config , err = p . getFileDBConfig ( )
if err != nil || p . disabled { return err
}
decoders := [ ] decoder . API { decoder . NewJSONDecoder ( ) , decoder . NewYAMLDecoder ( ) }
if p . client , err = NewClient ( p . config . ConfigPaths , p . config . StatusPath , decoders , filesystem . NewFsHandler ( ) , p . Log ) ; err != nil { return err
}
p . protoWrapper = kvproto . NewProtoWrapper ( p . client , & keyval . SerializerJSON { } )
return nil
}
|
Init reads file config and creates new client to communicate with file system
|
62
|
func ( p * Plugin ) AfterInit ( ) error { if ! p . disabled { p . client . eventWatcher ( )
}
return nil
}
|
AfterInit starts file system event watcher
|
63
|
func NewChange ( key string , value proto . Message , rev int64 , changeType datasync . Op ) * Change { return & Change { changeType : changeType , KeyVal : & KeyVal { key , & lazyProto { value } , rev } , }
}
|
NewChange creates a new instance of Change .
|
64
|
func NewChangeBytes ( key string , value [ ] byte , rev int64 , changeType datasync . Op ) * Change { return & Change { changeType : changeType , KeyVal : & KeyValBytes { key , value , rev } , }
}
|
NewChangeBytes creates a new instance of NewChangeBytes .
|
65
|
func ( plugin * ExamplePlugin ) Init ( ) ( err error ) { plugin . kafkaSyncPublisher , err = plugin . Kafka . NewSyncPublisherToPartition ( connection , topic1 , syncMessagePartition )
if err != nil { return err
}
plugin . subscription = make ( chan messaging . ProtoMessage )
plugin . Log . Info ( "Initialization of the custom plugin for the Kafka example is completed" )
go plugin . producer ( )
go plugin . closeExample ( )
return err
}
|
Init initializes and starts producers
|
66
|
func ( p * Plugin ) Init ( ) ( err error ) { redisCfg , err := p . getRedisConfig ( )
if err != nil || p . disabled { return err
}
client , err := ConfigToClient ( redisCfg )
if err != nil { return err
}
p . connection , err = NewBytesConnection ( client , p . Log )
if err != nil { return err
}
p . protoWrapper = kvproto . NewProtoWrapper ( p . connection , & keyval . SerializerJSON { } )
return nil
}
|
Init retrieves redis configuration and establishes a new connection with the redis data store . If the configuration file doesn t exist or cannot be read the returned errora will be of os . PathError type . An untyped error is returned in case the file doesn t contain a valid YAML configuration .
|
67
|
func ( p * Plugin ) AfterInit ( ) error { if p . StatusCheck != nil && ! p . disabled { p . StatusCheck . Register ( p . PluginName , func ( ) ( statuscheck . PluginState , error ) { _ , _ , err := p . NewBroker ( "/" ) . GetValue ( healthCheckProbeKey , nil )
if err == nil { return statuscheck . OK , nil
}
return statuscheck . Error , err
} )
p . Log . Infof ( "Status check for %s was started" , p . PluginName )
}
return nil
}
|
AfterInit registers redis to status check if required
|
68
|
func ( p * Plugin ) Init ( ) ( err error ) { if p . Config == nil { p . Config , err = p . getConfig ( )
if err != nil || p . disabled { return err
}
}
clientCfg , err := ConfigToClient ( p . Config )
if err != nil { return err
}
p . client , err = NewClient ( clientCfg )
if err != nil { p . Log . Errorf ( "Err: %v" , err )
return err
}
p . reconnectResync = p . Config . ReconnectResync
p . protoWrapper = kvproto . NewProtoWrapper ( p . client , & keyval . SerializerJSON { } )
if p . StatusCheck != nil { p . StatusCheck . Register ( p . PluginName , p . statusCheckProbe )
} else { p . Log . Warnf ( "Unable to start status check for consul" )
}
return nil
}
|
Init initializes Consul plugin .
|
69
|
func ConfigToClient ( cfg * Config ) ( * api . Config , error ) { clientCfg := api . DefaultConfig ( )
if cfg . Address != "" { clientCfg . Address = cfg . Address
}
return clientCfg , nil
}
|
ConfigToClient transforms Config into api . Config which is ready for use with underlying consul package .
|
70
|
func ToChan ( ch chan NamedMappingGenericEvent , opts ... interface { } ) func ( dto NamedMappingGenericEvent ) { timeout := DefaultNotifTimeout
var logger logging . Logger = logrus . DefaultLogger ( )
return func ( dto NamedMappingGenericEvent ) { select { case ch <- dto : case <- time . After ( timeout ) : logger . Warn ( "Unable to deliver notification" )
}
}
}
|
ToChan creates a callback that can be passed to the Watch function in order to receive notifications through a channel . If the notification can not be delivered until timeout it is dropped .
|
71
|
func NewHelloWorld ( ) * HelloWorld { p := new ( HelloWorld )
p . SetName ( "helloworld" )
p . Setup ( )
return p
}
|
NewHelloWorld is a constructor for our HelloWorld plugin .
|
72
|
func NewMultiplexer ( consumerFactory ConsumerFactory , producers multiplexerProducers , clientCfg * client . Config , name string , log logging . Logger ) * Multiplexer { if clientCfg . Logger == nil { clientCfg . Logger = log
}
cl := & Multiplexer { consumerFactory : consumerFactory , Logger : log , name : name , mapping : [ ] * consumerSubscription { } , multiplexerProducers : producers , config : clientCfg , }
go cl . watchAsyncProducerChannels ( )
if producers . manAsyncProducer != nil && producers . manAsyncProducer . Config != nil { go cl . watchManualAsyncProducerChannels ( )
}
return cl
}
|
NewMultiplexer creates new instance of Kafka Multiplexer
|
73
|
func ( mux * Multiplexer ) Start ( ) error { mux . rwlock . Lock ( )
defer mux . rwlock . Unlock ( )
var err error
if mux . started { return fmt . Errorf ( "multiplexer has been started already" )
}
mux . started = true
var hashTopics , manTopics [ ] string
for _ , subscription := range mux . mapping { if subscription . manual { manTopics = append ( manTopics , subscription . topic )
continue
}
hashTopics = append ( hashTopics , subscription . topic )
}
mux . config . SetRecvMessageChan ( make ( chan * client . ConsumerMessage ) )
mux . config . GroupID = mux . name
mux . config . SetInitialOffset ( sarama . OffsetOldest )
mux . config . Topics = append ( hashTopics , manTopics ... )
mux . WithFields ( logging . Fields { "hashTopics" : hashTopics , "manualTopics" : manTopics } ) . Debugf ( "Consuming started" )
mux . Consumer , err = client . NewConsumer ( mux . config , nil )
if err != nil { return err
}
if len ( hashTopics ) == 0 { mux . Debug ( "No topics for hash partitioner" )
} else { mux . WithFields ( logging . Fields { "topics" : hashTopics } ) . Debugf ( "Consuming (hash) started" )
mux . Consumer . StartConsumerHandlers ( )
}
if len ( manTopics ) == 0 { mux . Debug ( "No topics for manual partitioner" )
} else { mux . WithFields ( logging . Fields { "topics" : manTopics } ) . Debugf ( "Consuming (manual) started" )
for _ , sub := range mux . mapping { if sub . manual { sConsumer := mux . Consumer . SConsumer
if sConsumer == nil { return fmt . Errorf ( "consumer for manual partition is not available" )
}
partitionConsumer , err := sConsumer . ConsumePartition ( sub . topic , sub . partition , sub . offset )
if err != nil { return err
}
sub . partitionConsumer = & partitionConsumer
mux . Logger . WithFields ( logging . Fields { "topic" : sub . topic , "partition" : sub . partition , "offset" : sub . offset } ) . Info ( "Partition sConsumer started" )
mux . Consumer . StartConsumerManualHandlers ( partitionConsumer )
}
}
}
go mux . genericConsumer ( )
go mux . manualConsumer ( mux . Consumer )
return err
}
|
Start should be called once all the Connections have been subscribed for topic consumption . An attempt to start consuming a topic after the multiplexer is started returns an error .
|
74
|
func ( mux * Multiplexer ) Close ( ) { safeclose . Close ( mux . Consumer , mux . hashSyncProducer , mux . hashAsyncProducer , mux . manSyncProducer , mux . manAsyncProducer )
}
|
Close cleans up the resources used by the Multiplexer
|
75
|
func ( mux * Multiplexer ) NewBytesConnection ( name string ) * BytesConnectionStr { return & BytesConnectionStr { BytesConnectionFields { multiplexer : mux , name : name } }
}
|
NewBytesConnection creates instance of the BytesConnectionStr that provides access to shared Multiplexer s clients with hash partitioner .
|
76
|
func ( mux * Multiplexer ) NewBytesManualConnection ( name string ) * BytesManualConnectionStr { return & BytesManualConnectionStr { BytesConnectionFields { multiplexer : mux , name : name } }
}
|
NewBytesManualConnection creates instance of the BytesManualConnectionStr that provides access to shared Multiplexer s clients with manual partitioner .
|
77
|
func ( mux * Multiplexer ) NewProtoConnection ( name string , serializer keyval . Serializer ) * ProtoConnection { return & ProtoConnection { ProtoConnectionFields { multiplexer : mux , serializer : serializer , name : name } }
}
|
NewProtoConnection creates instance of the ProtoConnection that provides access to shared Multiplexer s clients with hash partitioner .
|
78
|
func ( mux * Multiplexer ) NewProtoManualConnection ( name string , serializer keyval . Serializer ) * ProtoManualConnection { return & ProtoManualConnection { ProtoConnectionFields { multiplexer : mux , serializer : serializer , name : name } }
}
|
NewProtoManualConnection creates instance of the ProtoConnectionFields that provides access to shared Multiplexer s clients with manual partitioner .
|
79
|
func ( mux * Multiplexer ) propagateMessage ( msg * client . ConsumerMessage ) { mux . rwlock . RLock ( )
defer mux . rwlock . RUnlock ( )
if msg == nil { return
}
for _ , subscription := range mux . mapping { if msg . Topic == subscription . topic { if subscription . manual { if msg . Partition == subscription . partition && msg . Offset >= subscription . offset { mux . Debug ( "offset " , msg . Offset , string ( msg . Value ) , string ( msg . Key ) , msg . Partition )
subscription . byteConsMsg ( msg )
}
} else { mux . Debug ( "offset " , msg . Offset , string ( msg . Value ) , string ( msg . Key ) , msg . Partition )
subscription . byteConsMsg ( msg )
}
}
}
}
|
Propagates incoming messages to respective channels .
|
80
|
func ( mux * Multiplexer ) genericConsumer ( ) { mux . Debug ( "Generic Consumer started" )
for { select { case <- mux . Consumer . GetCloseChannel ( ) : mux . Debug ( "Closing Consumer" )
return
case msg := <- mux . Consumer . Config . RecvMessageChan : mux . propagateMessage ( msg )
case err := <- mux . Consumer . Config . RecvErrorChan : mux . Error ( "Received partitionConsumer error " , err )
}
}
}
|
genericConsumer handles incoming messages to the multiplexer and distributes them among the subscribers .
|
81
|
func ( mux * Multiplexer ) stopConsuming ( topic string , name string ) error { mux . rwlock . Lock ( )
defer mux . rwlock . Unlock ( )
var wasError error
var topicFound bool
for index , subs := range mux . mapping { if ! subs . manual && subs . topic == topic && subs . connectionName == name { topicFound = true
mux . mapping = append ( mux . mapping [ : index ] , mux . mapping [ index + 1 : ] ... )
}
}
if ! topicFound { wasError = fmt . Errorf ( "topic %s was not consumed by '%s'" , topic , name )
}
return wasError
}
|
Remove consumer subscription on given topic . If there is no such a subscription return error .
|
82
|
func ( plugin * ExamplePlugin ) asyncEventHandler ( ) { plugin . Log . Info ( "Started Kafka async event handler..." )
msgCounter := 0
asyncMsgSucc := 0
if messageCountNum == 0 { plugin . asyncSuccess = true
plugin . asyncRecv = true
}
for { select { case message := <- plugin . asyncSubscription : plugin . Log . Infof ( "Received async Kafka Message, topic '%s', partition '%v', offset '%v', key: '%s', " , message . GetTopic ( ) , message . GetPartition ( ) , message . GetOffset ( ) , message . GetKey ( ) )
plugin . kafkaWatcher . MarkOffset ( message , "" )
msgCounter ++
if msgCounter == messageCountNum { plugin . asyncRecv = true
}
case message := <- plugin . asyncSuccessChannel : plugin . Log . Infof ( "Async message successfully delivered, topic '%s', partition '%v', offset '%v', key: '%s', " , message . GetTopic ( ) , message . GetPartition ( ) , message . GetOffset ( ) , message . GetKey ( ) )
asyncMsgSucc ++
if asyncMsgSucc == messageCountNum { plugin . asyncSuccess = true
}
case err := <- plugin . asyncErrorChannel : plugin . Log . Errorf ( "Failed to publish async message, %v" , err )
}
}
}
|
asyncEventHandler is a Kafka consumer asynchronously processing events from a channel associated with a specific topic . If a producer sends a message matching this destination criteria the consumer will receive it .
|
83
|
func UseAuthenticator ( a BasicHTTPAuthenticator ) Option { return func ( p * Plugin ) { p . Deps . Authenticator = a
}
}
|
UseAuthenticator returns an Option which sets HTTP Authenticator .
|
84
|
func ( p * Plugin ) AfterInit ( ) error { if p . StatusCheck != nil && ! p . disabled { p . StatusCheck . Register ( p . PluginName , p . statusCheckProbe )
p . Log . Infof ( "Status check for %s was started" , p . PluginName )
}
return nil
}
|
AfterInit registers ETCD plugin to status check if needed
|
85
|
func ( p * Plugin ) OnConnect ( callback func ( ) error ) { p . Lock ( )
defer p . Unlock ( )
if p . connected { if err := callback ( ) ; err != nil { p . Log . Errorf ( "callback for OnConnect failed: %v" , err )
}
} else { p . onConnection = append ( p . onConnection , callback )
}
}
|
OnConnect executes callback if plugin is connected or gathers functions from all plugin with ETCD as dependency
|
86
|
func ( p * Plugin ) Compact ( rev ... int64 ) ( toRev int64 , err error ) { if p . connection != nil { return p . connection . Compact ( rev ... )
}
return 0 , fmt . Errorf ( "connection is not established" )
}
|
Compact compatcs the ETCD database to the specific revision
|
87
|
func ( p * Plugin ) etcdReconnectionLoop ( clientCfg * ClientConfig ) { var err error
interval := p . config . ReconnectInterval
if interval == 0 { interval = defaultReconnectInterval
}
p . Log . Infof ( "ETCD server %s not reachable in init phase. Agent will continue to try to connect every %d second(s)" , p . config . Endpoints , interval )
for { time . Sleep ( interval )
p . Log . Infof ( "Connecting to ETCD %v ..." , p . config . Endpoints )
p . connection , err = NewEtcdConnectionWithBytes ( * clientCfg , p . Log )
if err != nil { continue
}
p . setupPostInitConnection ( )
return
}
}
|
Method starts loop which attempt to connect to the ETCD . If successful send signal callback with resync which will be started when datasync confirms successful registration
|
88
|
func ( p * Plugin ) configureConnection ( ) { if p . config . AutoCompact > 0 { if p . config . AutoCompact < time . Duration ( time . Minute * 60 ) { p . Log . Warnf ( "Auto compact option for ETCD is set to less than 60 minutes!" )
}
p . startPeriodicAutoCompact ( p . config . AutoCompact )
}
p . protoWrapper = kvproto . NewProtoWrapper ( p . connection , & keyval . SerializerJSON { } )
}
|
If ETCD is connected complete all other procedures
|
89
|
func ( p * Plugin ) statusCheckProbe ( ) ( statuscheck . PluginState , error ) { if p . connection == nil { p . connected = false
return statuscheck . Error , fmt . Errorf ( "no ETCD connection available" )
}
if _ , _ , _ , err := p . connection . GetValue ( healthCheckProbeKey ) ; err != nil { p . lastConnErr = err
p . connected = false
return statuscheck . Error , err
}
if p . config . ReconnectResync && p . lastConnErr != nil { if p . Resync != nil { p . Resync . DoResync ( )
p . lastConnErr = nil
} else { p . Log . Warn ( "Expected resync after ETCD reconnect could not start beacuse of missing Resync plugin" )
}
}
p . connected = true
return statuscheck . OK , nil
}
|
ETCD status check probe function
|
90
|
func GetAsyncProducerMock ( t mocks . ErrorReporter ) ( * AsyncProducer , * mocks . AsyncProducer ) { saramaCfg := sarama . NewConfig ( )
saramaCfg . Producer . Return . Successes = true
mock := mocks . NewAsyncProducer ( t , saramaCfg )
cfg := NewConfig ( logrus . DefaultLogger ( ) )
cfg . SetSendSuccess ( true )
cfg . SetSuccessChan ( make ( chan * ProducerMessage , 1 ) )
ap := AsyncProducer { Logger : logrus . DefaultLogger ( ) , Config : cfg , Producer : mock , closeChannel : make ( chan struct { } ) , Client : & saramaClientMock { } }
go ap . successHandler ( mock . Successes ( ) )
return & ap , mock
}
|
GetAsyncProducerMock returns mocked implementation of async producer that doesn t need connection to Kafka broker and can be used for testing purposes .
|
91
|
func GetSyncProducerMock ( t mocks . ErrorReporter ) ( * SyncProducer , * mocks . SyncProducer ) { saramaCfg := sarama . NewConfig ( )
saramaCfg . Producer . Return . Successes = true
mock := mocks . NewSyncProducer ( t , saramaCfg )
cfg := NewConfig ( logrus . DefaultLogger ( ) )
ap := SyncProducer { Logger : logrus . DefaultLogger ( ) , Config : cfg , Producer : mock , closeChannel : make ( chan struct { } ) , Client : & saramaClientMock { } }
return & ap , mock
}
|
GetSyncProducerMock returns mocked implementation of sync producer that doesn t need connection to Kafka broker and can be used for testing purposes .
|
92
|
func GetConsumerMock ( t mocks . ErrorReporter ) * Consumer { cfg := NewConfig ( logrus . DefaultLogger ( ) )
ap := Consumer { Logger : logrus . DefaultLogger ( ) , Config : cfg , Consumer : newClusterConsumerMock ( t ) , closeChannel : make ( chan struct { } ) , }
return & ap
}
|
GetConsumerMock returns mocked implementation of consumer that doesn t need connection to kafka cluster .
|
93
|
func ( p * Plugin ) AfterInit ( ) error { if ! p . Messaging . Disabled ( ) { cfg := p . Config
p . Cfg . LoadValue ( & cfg )
if cfg . Topic != "" { var err error
p . adapter , err = p . Messaging . NewSyncPublisher ( "msgsync-connection" , cfg . Topic )
if err != nil { return err
}
}
}
return nil
}
|
AfterInit uses provided MUX connection to build new publisher .
|
94
|
func ( p * Process ) deleteProcess ( ) error { if p . command == nil || p . command . Process == nil { return nil
}
if p . cancelChan != nil { close ( p . cancelChan )
}
p . log . Debugf ( "Process %s deleted" , p . name )
return nil
}
|
Delete stops the process and internal watcher
|
95
|
func ( p * Process ) watch ( ) { if p . isWatched { p . log . Warnf ( "Process watcher already running" )
return
}
p . log . Debugf ( "Process %s watcher started" , p . name )
p . isWatched = true
ticker := time . NewTicker ( 1 * time . Second )
var last status . ProcessStatus
var numRestarts int32
var autoTerm bool
if p . options != nil { numRestarts = p . options . restart
autoTerm = p . options . autoTerm
}
for { select { case <- ticker . C : var current status . ProcessStatus
if current == status . Initial { continue
}
if ! p . isAlive ( ) { current = status . Terminated
} else { pStatus , err := p . GetStatus ( p . GetPid ( ) )
if err != nil { p . log . Warn ( err )
}
if pStatus . State == "" { current = status . Unavailable
} else { current = pStatus . State
}
}
if current != last { if p . GetNotificationChan ( ) != nil { p . options . notifyChan <- current
}
if current == status . Terminated { if numRestarts > 0 || numRestarts == infiniteRestarts { go func ( ) { var err error
if p . command , err = p . startProcess ( ) ; err != nil { p . log . Error ( "attempt to restart process %s failed: %v" , p . name , err )
}
} ( )
numRestarts --
} else { p . log . Debugf ( "no more attempts to restart process %s" , p . name )
}
}
if current == status . Zombie && autoTerm { p . log . Debugf ( "Terminating zombie process %d" , p . GetPid ( ) )
if _ , err := p . Wait ( ) ; err != nil { p . log . Warnf ( "failed to terminate dead process: %s" , p . GetPid ( ) , err )
}
}
}
last = current
case <- p . cancelChan : ticker . Stop ( )
p . closeNotifyChan ( )
return
}
}
}
|
Periodically tries to ping process . If the process is unresponsive marks it as terminated . Otherwise the process status is updated . If process status was changed notification is sent . In addition terminated processes are restarted if allowed by policy and dead processes are cleaned up .
|
96
|
func ( plugin * ExamplePlugin ) checkStatus ( closeCh chan struct { } ) { for { select { case <- closeCh : plugin . Log . Info ( "Closing" )
return
case <- time . After ( 1 * time . Second ) : status := plugin . StatusMonitor . GetAllPluginStatus ( )
for k , v := range status { plugin . Log . Infof ( "Status[%v] = %v" , k , v )
}
}
}
}
|
checkStatus periodically prints status of plugins that publish their state to status check plugin
|
97
|
func ( p * Plugin ) addHook ( hookName string , hookConfig HookConfig ) error { var lgHook logrus . Hook
var err error
switch hookName { case HookSysLog : address := hookConfig . Address
if hookConfig . Address != "" { address = address + ":" + strconv . Itoa ( hookConfig . Port )
}
lgHook , err = lgSyslog . NewSyslogHook ( hookConfig . Protocol , address , syslog . LOG_INFO , p . ServiceLabel . GetAgentLabel ( ) , )
case HookLogStash : lgHook , err = logrustash . NewHook ( hookConfig . Protocol , hookConfig . Address + ":" + strconv . Itoa ( hookConfig . Port ) , p . ServiceLabel . GetAgentLabel ( ) , )
case HookFluent : lgHook , err = logrus_fluent . NewWithConfig ( logrus_fluent . Config { Host : hookConfig . Address , Port : hookConfig . Port , DefaultTag : p . ServiceLabel . GetAgentLabel ( ) , } )
default : return fmt . Errorf ( "unsupported hook: %q" , hookName )
}
if err != nil { return fmt . Errorf ( "creating hook for %v failed: %v" , hookName , err )
}
cHook := & commonHook { Hook : lgHook }
if len ( hookConfig . Levels ) == 0 { cHook . levels = [ ] logrus . Level { logrus . PanicLevel , logrus . FatalLevel , logrus . ErrorLevel }
} else { for _ , level := range hookConfig . Levels { if lgl , err := logrus . ParseLevel ( level ) ; err == nil { cHook . levels = append ( cHook . levels , lgl )
} else { p . Log . Warnf ( "cannot parse hook log level %v : %v" , level , err . Error ( ) )
}
}
}
p . LogRegistry . AddHook ( cHook )
return nil
}
|
store hook into registy for late use and applies to existing loggers
|
98
|
func ( level LogLevel ) String ( ) string { switch level { case PanicLevel : return "panic"
case FatalLevel : return "fatal"
case ErrorLevel : return "error"
case WarnLevel : return "warn"
case InfoLevel : return "info"
case DebugLevel : return "debug"
default : return fmt . Sprintf ( "unknown(%d)" , level )
}
}
|
String converts the LogLevel to a string . E . g . PanicLevel becomes panic .
|
99
|
func ParseLogLevel ( level string ) LogLevel { switch strings . ToLower ( level ) { case "debug" : return DebugLevel
case "info" : return InfoLevel
case "warn" , "warning" : return WarnLevel
case "error" : return ErrorLevel
case "fatal" : return FatalLevel
case "panic" : return PanicLevel
default : return InfoLevel
}
}
|
ParseLogLevel parses string representation of LogLevel .
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 11