sails logging to file - file

Can someone provide an example of how to configure sails.js to log to a file?
It seems like it should be straightforward, but I'm having trouble finding examples online.
I'm looking at changes in the config/log.js or the config/sockets.js files.

According to the source code, for v0.9.x, you just have to set the filePath in your config/log.js:
module.exports = {
log: {
level: 'info',
filePath: 'application.log'
}
};

Logging to a file doesn't work out of the box. You need to invoke functionality in libraries two levels down. See the documentation for winston.
first install winston like so:
$ npm install winston
Then adjust config/log.js to look as follows
var winston = require('winston');
/*see the documentation for Winston: https://github.com/flatiron/winston */
var logger = new(winston.Logger)({
transports: [
new (winston.transports.Console)({}),
new (winston.transports.File)({
filename: 'logfile.log',
level: 'verbose',
json: false,
colorize: false
})
]
});
module.exports.log = {
/***************************************************************************
* *
* Valid `level` configs: i.e. the minimum log level to capture with *
* sails.log.*() *
* *
* The order of precedence for log levels from lowest to highest is: *
* silly, verbose, info, debug, warn, error *
* *
* You may also set the level to "silent" to suppress all logs. *
* *
***************************************************************************/
level: 'silly',
colorize: false,
custom: logger
};

For winston 3.x.x versions
#djsadinoff's answer not works.
Instead do:
$ npm install winston
Replace your config/log.js file with the following code in Sails.js
var winston = require('winston');
const logger = winston.createLogger({
level: 'silly',
format: winston.format.json(),
transports: [
//
// - Write to all logs with level `info` and below to `combined.log`
// - Write all logs error (and below) to `error.log`.
//
new winston.transports.File({ filename: 'error.log', level: 'error' }),
new winston.transports.File({ filename: 'sails.log' })
]
});
//
// If we're not in production then log to the `console` with the format:
// `${info.level}: ${info.message} JSON.stringify({ ...rest }) `
//
if (process.env.NODE_ENV !== 'production') {
logger.add(new winston.transports.Console({
format: winston.format.simple()
}));
}
module.exports.log = {
/***************************************************************************
* *
* Valid `level` configs: i.e. the minimum log level to capture with *
* sails.log.*() *
* *
* The order of precedence for log levels from lowest to highest is: *
* silly, verbose, info, debug, warn, error *
* *
* You may also set the level to "silent" to suppress all logs. *
* *
***************************************************************************/
// Pass in our custom logger, and pass all log levels through.
custom: logger,
level: 'silly',
// Disable captain's log so it doesn't prefix or stringify our meta data.
inspect: false
};
Then do
$ sails lift

Related

Gatling Test in Blazemeter creates ClassNotFoundException

I used the Taurus Gatling guide to create a simple performance test and uploaded the yaml and scala file to blazemeter. When i start the test in blazemeter there is no test result and the bzt.log contains a ClassNotFoundException.
The validator for the yaml says its fine and i can't find anything so I'm lost...
My blazemleter.yaml:
execution:
- executor: gatling
scenario: products
iterations: 15
concurrency: 3
ramp-up: 2
scenarios:
products:
script: productSimulation.scala
simulation: test.productSimulation
My productSimulation.scala is mostly copied from their documentation:
package test
import io.gatling.core.Predef._
import io.gatling.http.Predef._
class productSimulation extends Simulation {
// parse load profile from Taurus
val t_iterations = Integer.getInteger("iterations", 100).toInt
val t_concurrency = Integer.getInteger("concurrency", 10).toInt
val t_rampUp = Integer.getInteger("ramp-up", 1).toInt
val t_holdFor = Integer.getInteger("hold-for", 60).toInt
val t_throughput = Integer.getInteger("throughput", 100).toInt
val httpConf = http.baseURL("https://mydomain/")
val header = Map(
"Content-Type" -> """application/x-www-form-urlencoded""")
val sessionHeaders = Map("Authorization" -> "Bearer ${access_token}",
"Content-Type" -> "application/json")
// 'forever' means each thread will execute scenario until
// duration limit is reached
val loopScenario = scenario("products").forever() {
// auth
exec(http("POST OAuth Req")
.post("https://oauth-provider")
.formParam("client_secret", "...")
.formParam("client_id", "...")
.formParam("grant_type", "client_credentials")
.headers(header)
.check(status.is(200))
.check(jsonPath("$.access_token").exists
.saveAs("access_token")))
// read products
.exec(http("products")
.get("/products")
.queryParam("limit", 200)
.headers(sessionHeaders))
}
val execution = loopScenario
.inject(rampUsers(concurrency) over rampUp) // during for gatling 3.x
.protocols(httpConf)
setUp(execution).maxDuration(rampUp + holdFor)
}
After learning that i can execute the scala file as a test directly if i click the file directly and not the yaml i got better exceptions.
Basicly i made two mistakes:
my variables are named t_concurrency, ... while the execution definition uses a different name. ups.
since gatling 3.x the keyword for the inject is during, so the correct code is: rampUsers(t_concurrency) during t_rampUp
Now everything works.

Jenkins 'Pipeline script from SCM' polls infinitely

My Jenkins pipeline workflow for building project is as below.
There are two files checked into repository -- JenkinsfileAllBranches and Jenkinsfile
1) JenkinsfileAllBranches - Polls all branches for changes
def scm_branch = ''
pipeline {
agent any
triggers {
pollSCM('* * * * *') //used this for quick debugging
}
stages {
stage ('SCM') {
steps {
script {
def git_scm = checkout([$class: 'GitSCM', branches: [[name: '**']],
doGenerateSubmoduleConfigurations: false, extensions: [],
submoduleCfg: [], userRemoteConfigs: [[url: <repository_url>]]])
scm_branch = git_scm.GIT_BRANCH.substring('origin\\'.length())
}
}
}
stage('Call Jenkinsfile for specific branch') {
steps {
print("branch:${scm_branch}")
build job:'Build_Project', parameters:
[[$class:'StringParameterValue', name: 'BRANCH', value: scm_branch]]
}
}
}
}
2) Jenkinsfile - For ease, I am providing the simplified Jenkinsfile
pipeline {
agent any
options {
disableConcurrentBuilds()
}
parameters {
string(name: 'BRANCH', defaultValue:'', description: 'Enter a branch name to build.')
}
stages {
stage ('SCM') {
steps {
script {
print("Parameter BRANCH: ${params.BRANCH}")
}
git url: <repo_url>, branch: params.BRANCH
}
}
}
}
Problem: First job is expected to poll every minute for changes in branches and second job is expected to build that specific branch where changes are found.
Everything works as expected when I leave the pipeline scripts in place.
But when I select 'Pipeline from SCM', JenkinsfileAllBranches behaves weirdly. It keeps polling the same branch again and again. How do I resolve this loop?
In the Stage -- Call Jenkinsfile for specific branch, I notice that it's always executing the branch as master instead of branch1 or branch2 where scm changes are found.

Error in JSON.parse() (when called from API Gateway)

I'm working on AWS lambda + API Gateway, and I need to pass an array of numbers in the url (GET method) for a REST call. It seems a good way is to pass the numbers as string (comma separated) and then use JSON.parse for the conversion to an array of numbers.
Following is the AWS lambda code I'm using;
exports.handler = (event, context, callback) => {
var arr = JSON.parse('[' + event.numbers + ']');
console.log("array: " + arr);
// TODO implement
callback(null, 'Hello from Lambda');
};
I'm testing this function in AWS Lambda using this Input test event;
{
"numbers": "1,5"
}
And everything works as expected; no errors.
However, when I test it via API Gateway, and passing the numbers as string in the query, I get following error (observed via CloudWatch);
*19:19:02
START RequestId: eabab882-8cee-11e7-8e2f-79d3086e061f Version: $LATEST
19:19:02
2017-08-29T19:19:02.688Z eabab882-8cee-11e7-8e2f-79d3086e061f SyntaxError: Unexpected token u in JSON at position 1 at Object.parse (native) at exports.handler (/var/task/index.js:4:20)
19:19:02
END RequestId: eabab882-8cee-11e7-8e2f-79d3086e061f
19:19:02
REPORT RequestId: eabab882-8cee-11e7-8e2f-79d3086e061f Duration: 215.25 ms Billed Duration: 300 ms Memory Size: 128 MB Max Memory Used: 18 MB
19:19:02
RequestId: eabab882-8cee-11e7-8e2f-79d3086e061f Process exited before completing request*
This is the request passed to lambda as shown in the log;
"body-json" : {},
"params" : {
"path" : {
}
,"querystring" : {
"numbers" : "1,6"
}
,"header" : {
}
},
"stage-variables" : {
},
I can't figure out what the problem is, since I'm passing same string in both cases.
I would appreciate any help.
Thanks
Gus
With this input json informed, you need to get it like this:
var arr = JSON.parse('[' + event.params.querystring.numbers + ']');
rather than:
var arr = JSON.parse('[' + event.numbers + ']');
Or make a body mapping template to stay the way you want:
{ "number": "$input.params('number')" }
http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html
I hope I have helped!

FlinkQueryableState: configuration issues on a local cluster

I am running flink from IDE. Storing data in the queryable is working,
but somehow when I query it, it throws an exception.
Exception
Failure(akka.actor.ActorNotFound: Actor not found for: ActorSelection[Anchor(akka.tcp://flink#127.0.0.1:6123/), Path(/user/jobmanager)])
My code:
config.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY,"localhost")
config.setString(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY,"6123")
#throws[Throwable]
def recover(failure: Throwable): Future[Array[Byte]] = if (failure.isInstanceOf[AssertionError]) return Futures.failed(failure)
else {
// At startup some failures are expected
// due to races. Make sure that they don't
// fail this test.
return Patterns.after(retryDelay, TEST_ACTOR_SYSTEM.scheduler, TEST_ACTOR_SYSTEM.dispatcher, new Callable[Future[Array[Byte]]]() {
#throws[Exception]
def call: Future[Array[Byte]] = return getKvStateWithRetries(queryName, key, serializedKey)
})
}
}
#SuppressWarnings(Array("unchecked"))
private def getKvStateWithRetries(queryName: String,
keyHash: Int,
serializedKey: Array[Byte]): Future[Array[Byte]] = {
val kvState = client.getKvState(jobID, queryName, keyHash, serializedKey)
kvState.recoverWith(recover(queryName, keyHash, serializedKey))
}
def onSuccess = new OnSuccess[Array[Byte]]() {
#throws(classOf[Throwable])
override def onSuccess(result: Array[Byte]): Unit = {
println("found record ")
val value = KvStateRequestSerializer.deserializeValue(result, valueSerializer)
println(value)
}
}
override def invoke(query: QueryMetaData): Unit = {
println("getting inside querystore"+query.record)
val serializedResult = flinkQuery.getResult(query.record, queryName)
serializedResult.onSuccess(onSuccess)
I am not spawning a new mini-cluster or cluster.submit
like https://github.com/apache/flink/blob/master/flink-tests/src/test/java/org/apache/flink/test/query/QueryableStateITCase.java
as I want to this in the same cluster in the same environment as main app running with env.execute. Is that step necessary.
From the documentation by default flink runs at localhost:6123
Is there problem with connection? Do I need to submit job in separate cluster?
After a lot of googling i found a solution.
I am using LocalStreamEnvironment and getting the same error, Until a found this thread RemoteEnv connect failed. The error described is for a different setup(not locally) but the gist example contained in the topic used for testing is creating the LocalFlinkMiniCluster with the parameter "useSingleActorSystem" set to false.
Looking at the implementation of LocalStreamEnvironment the MiniCluster is created with "useSingleActorSystem" set to true.
I simply created a class LocalQueryableStreamEnvironment extending LocalStreamEnvironment where the mini cluster is created with "useSingleActorSystem" set to true, and everything is working from IDE.
Now my code is as follow:
Configuration:
Configuration config = new Configuration();
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 6);
config.setBoolean(ConfigConstants.LOCAL_START_WEBSERVER, true);
config.setInteger(JobManagerOptions.WEB_PORT, JobManagerOptions.WEB_PORT.defaultValue());
config.setBoolean(QueryableStateOptions.SERVER_ENABLE, true);
config.setString(JobManagerOptions.ADDRESS, "localhost");
config.setInteger(JobManagerOptions.PORT,JobManagerOptions.PORT.defaultValue());
**config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 2);**
NOTE: QueryableState only works with this config LOCAL_NUMBER_TASK_MANAGER set to value more then 1!
Instantiate/execute environment:
LocalQueryableStreamEnvironment env = LocalQueryableStreamEnvironment.createLocalEnvironment(3, config);
...
env.addSource(anySource)
.keyby(anyAtribute)
.flatmap(new UpdateMyStateToBeQueriedLaterMapper())
.addSink(..); //etc
...
env.execute("JobNameHere");
And to create the client:
final Configuration config = new Configuration();
config.setString(JobManagerOptions.ADDRESS, "localhost");
config.setInteger(JobManagerOptions.PORT, JobManagerOptions.PORT.defaultValue());
HighAvailabilityServices highAvailabilityServices = HighAvailabilityServicesUtils
.createHighAvailabilityServices(
config,
Executors.newSingleThreadScheduledExecutor(),
HighAvailabilityServicesUtils.AddressResolution.TRY_ADDRESS_RESOLUTION
);
return new QueryableStateClient(config,highAvailabilityServices);
For more info access:
Queryable States in ApacheFlink - Implementation
Queryable State Client with 1.3.0-rc0
My dependencies:
compile group: 'org.apache.flink', name: 'flink-java', version: '1.3.1'
compile group: 'org.apache.flink', name: 'flink-jdbc', version: '1.3.1'
compile group: 'org.apache.flink', name: 'flink-streaming-java_2.11', version: '1.3.1'
compile group: 'org.apache.flink', name: 'flink-clients_2.11', version: '1.3.1'
compile group: 'org.apache.flink', name: 'flink-cep_2.11', version: '1.3.1'
compile group: 'org.apache.flink', name: 'flink-connector-kafka-0.10_2.11', version: '1.3.1'
compile 'org.apache.flink:flink-runtime-web_2.11:1.3.1'

UserInfo SalesForce

var SalesforceOAuthPlugin = {
/**
* Obtain authentication credentials, calling 'authenticate' only if necessary.
* Most index.html authors can simply use this method to obtain auth credentials
* after onDeviceReady.
* success - The success callback function to use.
* fail - The failure/error callback function to use.
* PhoneGap returns a dictionary with:
* accessToken
* refreshToken
* clientId
* userId
* orgId
* loginUrl
* instanceUrl
* userAgent
*/
getAuthCredentials: function(success, fail) {
PhoneGap.exec(success, fail, "com.salesforce.oauth","getAuthCredentials",[]);
},
SalesforceOAuthPlugin.getAuthCredentials(getUserid(), getAuthCredentialsError);
I am trying to get the id of the user using the above function, but its not working.
Use link of this format https://na1.salesforce.com/id/1/1
na1 - salesforce instance, first 1 - organizationId, next 1 - userId.
Note, that in such get you should be logged (provide OAuth token).
I think that actual userId you provide in URL, or have mapping between token and id.

Resources