Google Analytics optimization with r.js - backbone.js

Hi everyone I am trying to make r.js work but i got error on Google Analytics like this
Tracing dependencies for: main
Cannot optimize network URL, skipping: //www.google-analytics.com/analytics.js
this is my requierjs.config file
window.GoogleAnalyticsObject = "__ga__";
window.__ga__ = function() {
for (var i=0; i<arguments.length; i++) {
var arg = arguments[i];
if (arg.constructor == Object && arg.hitCallback) {
arg.hitCallback();
}
}
};
window.__ga__.q = [["create", "UA-82626142-1", "auto"]];
window.__ga__.l = Date.now();
paths: {
ga: "//www.google-analytics.com/analytics"
},
shim: {
"ga": {
exports: "__ga__"
},
}
and this is my r.js config file
{
baseUrl: "../js",
name: "main",
out: 'app-built.js',
findNestedDependencies: true,
paths: {
ga: "//www.google-analytics.com/analytics",
},
include: 'requireLib',
mainConfigFile: "../js/main.js",
}
I have used ''empty path for r.js config but i dosn`t work. Thanks in advance

I have checked Requierjs documantation again and again and i found the right way to make cdns(urls) optimize with r.js. Need write in optimize config file
paths: {
....
ga:'empty'
}
we can write 'empty' for any cdn link just need use console command when we run optimize config file like this (this is my own example from my app)
'node build/r.js -o build/build.single.js paths.ga=empty:'
Hope this will be usfulle for others who have same probleme

Related

Webpack obfuscator not working with craco, maps disabled

today I have a very large problem using react & craco, I can't seem to get my webpack-obfuscator to do anything. I have disabled source maps, but to no avail.
This is my craco config:
const path = require("path");
const WebpackObfuscator = require('webpack-obfuscator');
module.exports = {
webpack: {
configure: (webpackConfig) => {
// Because CEF has issues with loading source maps properly atm,
// lets use the best we can get in line with `eval-source-map`
if (webpackConfig.mode === 'development' && process.env.IN_GAME_DEV) {
webpackConfig.devtool = 'eval-source-map'
webpackConfig.output.path = path.join(__dirname, 'build')
}
return webpackConfig
},
plugins: {
add: [
new WebpackObfuscator ({
rotateStringArray: true
}),
],
},
},
devServer: (devServerConfig) => {
if (process.env.IN_GAME_DEV) {
// Used for in-game dev mode
devServerConfig.writeToDisk = true
}
return devServerConfig
}
}
I get no visible maps files when building, and I've put "GENERATE_SOURCEMAP=false" in my .env file that's located where the package.json is.
Hopefully someone has the answer as to why this is happening.
Kind regards, and thanks for reading.
To upgrade a short config, you can use a construct that, if the condition is met, updates the configuration without using WebpackObfuscator:
module.exports = {
webpack: {
configure: {
...(process.env.IN_GAME_DEV && process.env.NODE_ENV === 'development' && {devtool: 'eval-source-map'})
}
}
}
Also, if you need additional properties for the configuration, in addition to the dvttool, you can add them

How to split dynamically by directories with Webpack/SplitChunks plugin?

I'm trying to split my React code (created with create-react-app) with the splitChunks plugin in the following way :
I have following components (JSX) structure :
services
serviceA
ComponentA1
ComponentA2
subFolder
ComponentA3
...
serviceB
ComponentB1
ComponentB2
...
serviceC
ComponentB1
ComponentB2
...
...
and I want to have following output (build) :
static/js
serviceA
serviceA.bundle.chunkhash.js
serviceB
serviceB.bundle.chunkhash.js
serviceC
serviceC.bundle.chunkhash.js
(other runtimes / mains are at the root of /static/js)
Another restriction is that components are loaded dynamically with
const Component = lazy(() => import(componentPath));
...
<Suspense fallback={..}>Component</suspense>
"componentPath" is determined on the fly (when a user clicks on an icon then it opens a given service).
The reason for this is that I want to include each bundle into a separate Docker image running the backend. Then each Docker image is reachable thanks to Application routing :
static/js/serviceA/ ==> js served by Docker container running service A
static/js/serviceB/ ==> js served by Docker container running service B
static/js/serviceC/ ==> js served by Docker container running service C
So far, I'v tried to:
set the output.chunkFilename to [name]/[name].[chunkhash].js
use the webpackChunkName with [name] and [request]:
[name] doesn't seem to work (got just litterally "[name]" as part of my directory name).
[request] flattens the name of the directories:
serviceA-ComponentA1
serviceA-ComponentA2
serviceA-subFolder-ComponentA3
serviceB-componentB1
serviceB-componentB2
...
Then I tried to use the splitChunks plugin with following :
splitChunks: {
chunks: 'all',
name: function(module) {
let serviceName = module.rawRequest ? module.rawRequest : 'default';
serviceName = serviceName.replace('../', '').replace('./', '');
serviceName = serviceName.split('/')[0];
return serviceName;
},
cacheGroups: {
vendors: {
test: /[\\/]node_modules[\\/]/,
priority: -10
},
default: {
minChunks: 2,
priority: -20,
reuseExistingChunk: true
},
serviceA: {
test: /[\\/]serviceA[\\/]/,
priority: -10
},
serviceB: {
test: /[\\/]serviceB[\\/]/,
priority: -10
},
serviceC: {
test: /[\\/]serviceC[\\/]/,
priority: -10
},
}
},
This approach looks like working as all my services are in their own directories. But I still have some additional directories as numbers (bundle ID probably) that I would have expect to be rather included into the default.
So the question is : is my approach correct ?
I'm not sure if the following option would work for you. I had a similar problem, where I needed different folders to be outputed on different bundles.
In my case, I started with the glob solution, suggested here.
Then, knowing that I needed an array of inputs for each desired output, I came up with this:
const path = require('path');
const glob = require('glob');
const plugins = [...];
module.exports = {
entry: glob.sync('./src/**/*.js').reduce((acc, item) => {
const path = item.split('/');
path.pop();
const rootFolder = path[2] ? `${path[0]}/${path[2]}` : path[0];
if (acc[rootFolder]) {
acc[rootFolder].push(item);
} else {
acc[rootFolder] = [item];
}
return acc;
}, {}),
output: {
filename: '[name]/main.js',
path: path.resolve(__dirname, 'dist'),
},
module: { ... },
plugins,
};
This is a simplified version of my config and it could probably be improved, but it works fine for my needs. :)
More info on glob library: https://github.com/isaacs/node-glob

webpack separate build files

I have a nested directory structure with jsx modules, like
app/js/header/index.jsx
app/js/task/runner.jsx
and so on
is it possible to have webpack transpile each one of them and output the result in the same directory as the jsx file?
Regards
If I understand you correctly, you want to put resulting module next to each source module. It seems that you can achieve this with a plugin:
var fs = require('fs');
function MyPlugin() {}
MyPlugin.prototype.apply = function(compiler) {
compiler.plugin('emit', function(compilation, callback) {
compilation.modules.forEach(m => {
if (/filename/.test(m.resource)) { // test for filename to exclude node_modules
fs.writeFileSync(m.resource + '.transpiled', m._source._value);
}
});
callback();
});
};
and in the webpack config:
{
...
plugins: [ MyPlugin() ],
...
}
Is it what you are trying to do?

grunt-msdeploy for deploying AngularJS app to multiple servers

I am using grunt-msdeploy for deploying angularJs code to one of the server, this is working perfectly fine. I would like to deploy the same code to multiple servers. How do i achieve it ? Please help !
Gruntfile.js code for msdeploy:
var path = require('path');
//add the config file name
var configFile = path.resolve('./deployconfig.json');
var config = require(configFile);
msdeploy: {
push: {
options: {
verb: 'sync',
allowUntrusted: 'true',
source: {
'contentPath': path.resolve('./dist')
},
dest: {
contentPath: config.contentPath,
wmsvc: config.serverAddress,
userName: config.userName,
password: config.password
}
}
}
}
grunt.loadNpmTasks('grunt-msdeploy');
//add task for deployment - copying the dist from local server to remote server
grunt.registerTask('deploy', ['msdeploy:push']);
deployconfig.json:
{
"contentPath": "c:/inetpub/wwwroot/dist",
"serverAddress": "ec2-xx-xx-xx-x.ap-northeast-1.compute.amazonaws.com",
"userName": "xxxxxxxxx",
"password": "xxxxxxxxx"
}
I have tried using multiple dest in the msdeploy with multiple servers information in the json file, but that didn't work. Is there a way to do this at all ?
I think you are configuring the task wrong, that's why it doesn't work in your case, it should be defined as taks for grunt, here's the pseudo-code:
grunt.initConfig({
msdeploy: {
options: {
// Task-specific options go here.
},
your_target: {
// Target-specific file lists and/or options go here.
},
},
});
More information and options in the official manual.
For multiple destinations just create several target descriptions,
grunt.initConfig({
msdeploy: {
options: {
// Task-specific options go here.
},
your_target_1: {
// Target-specific file lists and/or options go here.
},
your_target_2: {
// Target-specific file lists and/or options go here.
},
...
},
});
You may create options, generic for all of those targets, as well as specific options per each target.
When you'll run the task, simply do not specify which target you need to run, and it will execute them one by one:
// will be executed for all the targets from task `msdeploy` definition
grunt.registerTask('deploy', ['msdeploy']);
// or alternatively you may explicitly define the order of tasks:
grunt.registerTask('deploy', ['msdeploy:your_target_1', 'msdeploy:your_target_2']);
This is the working solution:
msdeploy: {
target1: {
options: {
verb: 'sync',
allowUntrusted: 'true',
source: {
'contentPath': path.resolve('./dist')
},
dest: {
contentPath: "target1path",
wmsvc: "target1serverAddress",
userName:"target1userName",
password:"target1password"
}
}
},
target2: {
options: {
verb: 'sync',
allowUntrusted: 'true',
source: {
'contentPath': path.resolve('./dist')
},
dest: {
contentPath: "target2path",
wmsvc: "target2serverAddress",
userName:"target2userName",
password:"target2password"
}
}
}
}
grunt.registerTask('deploy', ['msdeploy:target1', 'msdeploy:target2']);
In case if any one wants to do it with the config file , add multiple entries to the json config file like this:
[
{
"contentPath": "c:/inetpub/wwwroot/dist",
"serverAddress": "ec2-xx-xxx-xx-xxx.ap-northeast-1.compute.amazonaws.com",
"userName": "xxxxxxxxxx",
"password": "xxxxxxxxx"
},
{
"contentPath": "c:/inetpub/wwwroot/dist",
"serverAddress": "ec2-xx-xxx-xx-xxx.ap-northeast-1.compute.amazonaws.com",
"userName": "xxxxxxxxxx",
"password": "xxxxxxxxx"
}
]
and the values can be referred as :
var path = require('path');
var configFile = path.resolve('./deployconfig.json');
var config = require(configFile);
contentPath: config[0].contentPath,
wmsvc: config[0].serverAddress,
userName: config[0].userName,
password: config[0].password

Simple gulp task to deploy to google app engine and stream output to console?

Is there a good way to execute the gcloud commands to deploy an app to GAE and see the stderr/stdout echoed back at the console? I've tried gulp-exec but it seems to batch up the output, dumping only upon completion. It also won't play nicely when trying to preview locally.
In the end I didn't want to have to use another npm, but loosely inspired by a portion of gulp-run, I came up with the following that assumes a 'clean' and 'build' task exists, and also overrides some constants per environment for a gulp-replace task that is part of my 'build', the key being the spawn of a subshell and piping its output to the current process's:
// gulp deploy [-a dev|staging|prod]
gulp.task('deploy', function() {
var commands = {
remote: 'gcloud preview app deploy app.yaml -q --set-default --project ',
local: 'gcloud preview app run app.yaml'
};
var environments = {
dev: {
app: 'myapp-dev',
},
staging: {
app: 'myapp-staging',
MY_ENDPOINT: 'https://staging.example.com'
},
prod: {
app: 'myapp',
MY_ENDPOINT: 'https://example.com'
}
};
var command = commands.local;
var env = environments[argv.a];
if (env) {
command = commands.remote + env.app;
constantsMap.MY_ENDPOINT = env.MY_ENDPOINT;
}
// Now that our constants are configured, kick off the build, then deploy.
runSequence('clean', 'build', function() {
var title = util.format('$ %s\n', $.util.colors.blue(command));
process.stdout.write(title);
// run the command in its own subshell and pipe the output to our own.
var subshell = childProcess.spawn('sh', ['-c', command]);
subshell.stdout.pipe(process.stdout);
subshell.stderr.pipe(process.stderr);
});
});
This relies on the npms: run-sequence, util, yargs, gulp-load-plugins
If you want to execute commands following code snippet will help you. I have wrapped it inside a promise as you are using gulp:
var cp = require('child_process');
function executeCommand(command, option) {
return new Promise(function (resolve, reject) {
var args = [option.something, option.something];
var ls = cp.spawn(command, args);
var output = "";
ls.on('error', function (err) {
reject(err);
});
ls.stdout.on('data', function (data) {
output += String(data);
console.log(output)
});
ls.on('exit', function (code) {
if (code === 0) {
resolve({
"output": output
});
} else {
reject(Error(output));
}
});
});
}
I am using gulp-gae and it seems it works well.
Supported commands are appcfg.py and dev_appserver.py (in current version). It can be also configured to override some values from the given app.yaml.

Resources