touch(): Utime failed: Operation not permitted filesystem.inc:262 - drupal-7

I'm trying to setup a Vagrant box with Puphpet to then install Drupal using Drush.
However, when I drush dl drupal in my document root, I get the following error:
touch(): Utime failed: Operation not permitted filesystem.inc:262 [warning]
Project drupal (7.37) could not be downloaded to [error]
/var/www/public/drupal-7.37.
Not sure if this is a problem with the Vagrant box or something more to do with Drush/Composer.
My puphpet config is as follows:
vagrantfile:
target: local
vm:
box: puphpet/ubuntu1404-x64
box_url: puphpet/ubuntu1404-x64
hostname: local.puphpet
memory: '512'
cpus: '1'
chosen_provider: vmware_fusion
network:
private_network: 192.168.57.101
forwarded_port:
vflnp_idmj0ss9i4eg:
host: '7451'
guest: '22'
post_up_message: ''
provider:
virtualbox:
modifyvm:
natdnshostresolver1: on
showgui: '0'
vmware:
numvcpus: 1
parallels:
cpus: 1
provision:
puppet:
manifests_path: puphpet/puppet
manifest_file: site.pp
module_path: puphpet/puppet/modules
options:
- '--verbose'
- '--hiera_config /vagrant/puphpet/puppet/hiera.yaml'
- '--parser future'
synced_folder:
vflsf_3zddkb64ssq3:
source: ./
target: /var/www
sync_type: nfs
smb:
smb_host: ''
smb_username: ''
smb_password: ''
rsync:
args:
- '--verbose'
- '--archive'
- '-z'
exclude:
- .vagrant/
- .git/
auto: 'true'
owner: www-data
group: www-data
usable_port_range:
start: 10200
stop: 10500
ssh:
host: null
port: null
private_key_path: null
username: vagrant
guest_port: null
keep_alive: true
forward_agent: false
forward_x11: false
shell: 'bash -l'
vagrant:
host: detect
server:
install: '1'
packages:
- vim
- git-core
- curl
- build-essential
users_groups:
install: '1'
groups: { }
users: { }
firewall:
install: '1'
rules: { }
cron:
install: '1'
jobs: { }
nginx:
install: '0'
settings:
default_vhost: 1
proxy_buffer_size: 128k
proxy_buffers: '4 256k'
upstreams: { }
vhosts:
nxv_3d36yr0pt4c0:
server_name: awesome.dev
server_aliases:
- www.awesome.dev
www_root: /var/www/awesome
listen_port: '80'
index_files:
- index.html
- index.htm
- index.php
client_max_body_size: 1m
ssl: '0'
ssl_cert: ''
ssl_key: ''
ssl_port: '443'
ssl_protocols: ''
ssl_ciphers: ''
rewrite_to_https: '1'
spdy: '1'
locations:
nxvl_gsmm6kwb895r:
location: /
autoindex: off
try_files:
- $uri
- $uri/
- /index.php$is_args$args
fastcgi: ''
fastcgi_index: ''
fastcgi_split_path: ''
nxvl_uff97iq3j7jg:
location: '~ \.php$'
autoindex: off
try_files:
- $uri
- $uri/
- /index.php$is_args$args
fastcgi: '127.0.0.1:9000'
fastcgi_index: index.php
fastcgi_split_path: '^(.+\.php)(/.*)$'
fast_cgi_params_extra:
- 'SCRIPT_FILENAME $request_filename'
- 'APP_ENV dev'
proxies: { }
apache:
install: '1'
settings:
user: www-data
group: www-data
default_vhost: true
manage_user: false
manage_group: false
sendfile: 0
modules:
- proxy_fcgi
- rewrite
vhosts:
av_r7yrspsczqb7:
servername: test.dev
serveraliases:
- www.test.dev
docroot: /var/www/public
port: '80'
setenv:
- 'APP_ENV dev'
custom_fragment: ''
ssl: '0'
ssl_cert: ''
ssl_key: ''
ssl_chain: ''
ssl_certs_dir: ''
ssl_protocol: ''
ssl_cipher: ''
directories:
avd_f0un3li3r50v:
path: /var/www/awesome
options:
- Indexes
- FollowSymlinks
- MultiViews
allow_override:
- All
require:
- 'all granted'
custom_fragment: ''
files_match:
avdfm_9zp5eo9ej3xd:
path: \.php$
sethandler: 'proxy:fcgi://127.0.0.1:9000'
custom_fragment: ''
provider: filesmatch
provider: directory
php:
install: '1'
settings:
version: '55'
modules:
php:
- cli
- intl
- mcrypt
- curl
- common
- gd
- imagick
- mysql
pear: { }
pecl:
- pecl_http
ini:
display_errors: On
error_reporting: '-1'
session.save_path: /var/lib/php/session
date.timezone: UTC
fpm_ini:
error_log: /var/log/php-fpm.log
fpm_pools:
phpfp_ujz3vxrtob19:
ini:
prefix: www
listen: '127.0.0.1:9000'
security.limit_extensions: .php
user: www-user
group: www-data
composer: '1'
composer_home: ''
xdebug:
install: '1'
settings:
xdebug.default_enable: '1'
xdebug.remote_autostart: '0'
xdebug.remote_connect_back: '1'
xdebug.remote_enable: '1'
xdebug.remote_handler: dbgp
xdebug.remote_port: '9000'
blackfire:
install: '0'
settings:
server_id: ''
server_token: ''
agent:
http_proxy: ''
https_proxy: ''
log_file: stderr
log_level: '1'
php:
agent_timeout: '0.25'
log_file: ''
log_level: '1'
xhprof:
install: '0'
wpcli:
install: '0'
version: v0.19.0
drush:
install: '1'
version: 6.3.0
ruby:
install: '1'
versions: { }
python:
install: '1'
packages: { }
versions: { }
nodejs:
install: '1'
npm_packages:
- gulp
hhvm:
install: '0'
nightly: 0
composer: '1'
composer_home: ''
settings: { }
server_ini:
hhvm.server.host: 127.0.0.1
hhvm.server.port: '9000'
hhvm.log.use_log_file: '1'
hhvm.log.file: /var/log/hhvm/error.log
php_ini:
display_errors: On
error_reporting: '-1'
date.timezone: UTC
mysql:
install: '1'
settings:
version: '5.6'
root_password: '123'
override_options: { }
adminer: 0
users:
mysqlnu_xvr60jb5bh6r:
name: dbuser
password: '123'
databases:
mysqlnd_o0drfx5feu70:
name: dbname
sql: ''
grants:
mysqlng_xws9dc1d0p0q:
user: dbuser
table: '*.*'
privileges:
- ALL
postgresql:
install: '0'
settings:
global:
encoding: UTF8
version: '9.3'
server:
postgres_password: '123'
databases: { }
users: { }
grants: { }
adminer: 0
mongodb:
install: '0'
settings:
auth: 1
bind_ip: 127.0.0.1
port: '27017'
databases: { }
redis:
install: '0'
settings:
conf_port: '6379'
sqlite:
install: '0'
adminer: 0
databases: { }
mailcatcher:
install: '0'
settings:
smtp_ip: 0.0.0.0
smtp_port: 1025
http_ip: 0.0.0.0
http_port: '1080'
mailcatcher_path: /usr/local/rvm/wrappers/default
from_email_method: inline
beanstalkd:
install: '0'
settings:
listenaddress: 0.0.0.0
listenport: '13000'
maxjobsize: '65535'
maxconnections: '1024'
binlogdir: /var/lib/beanstalkd/binlog
binlogfsync: null
binlogsize: '10485760'
beanstalk_console: 0
rabbitmq:
install: '0'
settings:
port: '5672'
users: { }
vhosts: { }
plugins: { }
elastic_search:
install: '0'
settings:
version: 1.4.1
java_install: true
solr:
install: '0'
settings:
version: 4.10.2
port: '8984'

I was struggling with the same issue. I have found that NOT using NFS as the sync_type solved my problem.

Related

Github Actions Env Secrets not working in Deploy Job

name: Azure Static Web Apps CI/CD
on:
push:
branches:
- master
pull_request:
types: [opened, synchronize, reopened, closed]
branches:
- master
jobs:
build:
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.event.action != 'closed')
runs-on: ubuntu-latest
steps:
- uses: actions/checkout#v3
- uses: actions/setup-node#v3
with:
node-version: 16
- run: |
cd src
npm install
npm run citest
npm run build-dev2
rm -rf node_modules
- name: Publish website output
uses: actions/upload-artifact#v3
with:
name: DEMO APP
path: ${{ github.workspace }}/src/build
devDeploy:
runs-on: ubuntu-latest
needs: [build]
steps:
- name: Dev Deploy
uses: actions/download-artifact#v1
with:
name: DEMO APP
path: ${{ github.workspace }}
- name: Dev Deploy
id: builddeploy
uses: Azure/static-web-apps-deploy#v1
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_NEW_SUN_1185DE103 }}
repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for GitHub integrations (i.e. PR comments)
action: "upload"
###### Repository/Build Configurations ######
app_location: "" # App source code path relative to repository root
api_location: "" # Api source code path relative to repository root - optional
output_location: "build" # Built app content directory - optional
skip_app_build: true
env:
REACT_APP_AZURE_AD_KEY: ${{ secrets.REACT_APP_AZURE_AD_KEY }}
REACT_APP_AZURE_AD_REDIRECT_URI: ${{ secrets.REACT_APP_AZURE_AD_REDIRECT_URI }}
REACT_APP_GIS_APP_ID: ${{ secrets.REACT_APP_GIS_APP_ID }}
REACT_APP_TINYMCE_KEY: ${{ secrets.REACT_APP_TINYMCE_KEY }}
###### End of Repository/Build Configurations ######
close_pull_request_job:
if: github.event_name == 'pull_request' && github.event.action == 'closed'
runs-on: ubuntu-latest
name: Close Pull Request Job
steps:
- name: Close Pull Request
id: closepullrequest
uses: Azure/static-web-apps-deploy#v1
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_NEW_SUN_1185DE103 }}
action: "close"

NextJS deployment on sub path with ingress config

I am trying to deploy nextjs app in the sub path using k8s ingress.
Here is my ingress config:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: simple-ingress
annotations:
kubernetes.io/ingress.class: 'nginx'
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
tls:
- hosts:
- 'example.com'
rules:
- host: 'example.com'
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: react-app
port:
number: 80
- path: /site
pathType: Prefix
backend:
service:
name: next-app
port:
number: 3002
And here is my nextjs config:
const nextConfig = {
staticPageGenerationTimeout: 60,
assetPrefix: isProd ? '/site' : '',
basePath: isProd ? '/site' : '',
images: {
path: isProd ? '/site/_next/image' : '/_next/image',
},
}
module.exports = withNx(nextConfig)
With the above configuration, 1st ingress works and i am able to reach my endpoints at example.com, in the same time example.com/site returns http 404. What am i doing wrong?

How to access Kubernetes container environment variables from Next.js application?

In my next.config.js, I have a part that looks like this:
module.exports = {
serverRuntimeConfig: { // Will only be available on the server side
mySecret: 'secret'
},
publicRuntimeConfig: { // Will be available on both server and client
PORT: process.env.PORT,
GOOGLE_CLIENT_ID: process.env.GOOGLE_CLIENT_ID,
BACKEND_URL: process.env.BACKEND_URL
}
I have a .env file and when run locally, the Next.js application succesfully fetches the environment variables from the .env file.
I refer to the env variables like this for example:
axios.get(publicRuntimeConfig.BACKOFFICE_BACKEND_URL)
However, when I have this application deployed onto my Kubernetes cluster, the environment variables set in the deploy file are not being collected. So they return as undefined.
I read that .env files cannot be read due to the differences between frontend (browser based) and backend (Node based), but there must be some way to make this work.
Does anyone know how to use environment variables saved in your pods/containers deploy file on your frontend (browser based) application?
Thanks.
EDIT 1:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "38"
creationTimestamp: xx
generation: 40
labels:
app: appname
name: appname
namespace: development
resourceVersion: xx
selfLink: /apis/extensions/v1beta1/namespaces/development/deployments/appname
uid: xxx
spec:
progressDeadlineSeconds: xx
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: appname
tier: sometier
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: appname
tier: sometier
spec:
containers:
- env:
- name: NODE_ENV
value: development
- name: PORT
value: "3000"
- name: SOME_VAR
value: xxx
- name: SOME_VAR
value: xxxx
image: someimage
imagePullPolicy: Always
name: appname
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 3000
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 100Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
availableReplicas: 1
conditions:
- lastTransitionTime: xxx
lastUpdateTime: xxxx
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
observedGeneration: 40
readyReplicas: 1
replicas: 1
updatedReplicas: 1
You can create a config-map and then mount it as a file in your deployment with your custom environment variables.
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "38"
creationTimestamp: xx
generation: 40
labels:
app: appname
name: appname
namespace: development
resourceVersion: xx
selfLink: /apis/extensions/v1beta1/namespaces/development/deployments/appname
uid: xxx
spec:
progressDeadlineSeconds: xx
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: appname
tier: sometier
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: appname
tier: sometier
spec:
containers:
- env:
- name: NODE_ENV
value: development
- name: PORT
value: "3000"
- name: SOME_VAR
value: xxx
- name: SOME_VAR
value: xxxx
volumeMounts:
- name: environment-variables
mountPath: "your/path/to/store/the/file"
readOnly: true
image: someimage
imagePullPolicy: Always
name: appname
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 3000
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 100Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumes:
- name: environment-variables
configMap:
name: environment-variables
items:
- key: .env
path: .env
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
availableReplicas: 1
conditions:
- lastTransitionTime: xxx
lastUpdateTime: xxxx
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
observedGeneration: 40
readyReplicas: 1
replicas: 1
updatedReplicas: 1
I added the following configuration in your deployment file:
volumeMounts:
- name: environment-variables
mountPath: "your/path/to/store/the/file"
readOnly: true
volumes:
- name: environment-variables
configMap:
name: environment-variables
items:
- key: .env
path: .env
You can then create a config map with key ".env" with your environment variables on kubernetes.
Configmap like this:
apiVersion: v1
kind: ConfigMap
metadata:
name: environment-variables
namespace: your-namespace
data:
.env: |
variable1: value1
variable2: value2

Spring Boot - Liquibase doesn't create the DATABASECHANGELOG table

I have a Spring Boot application.
And my Liquibase doesn't create the DATABASECHANGELOG table.
It proceeds the changelog and after that throws an error.
Any ideas what could be wrong?
Something is missing?
error:
Caused by: com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: Table 'addressbook.DATABASECHANGELOG' doesn't exist
config:
spring:
jpa:
show-sql: false
hibernate:
ddl-auto: none
datasource:
url: jdbc:mysql://localhost:3306/addressbook?characterEncoding=UTF-8
username: root
password: root
driver-class-name: com.mysql.jdbc.Driver
liquibase:
enabled: true
change-log: "classpath:db/liquibase-changelog.yml"
changelog:
databaseChangeLog:
- changeSet:
id: 1
author: me
changes:
- sqlFile:
path: sql/20181025_startup.sql
relativeToChangelogFile: true
stripComments: true
- changeSet:
id: 2
author: me
changes:
- sqlFile:
path: sql/20181026_create_table_contacts.sql
relativeToChangelogFile: true
stripComments: true
dependencies:
gradleVersion=4.10.2
springCloudVersion=Finchley.SR2
springBootVersion=2.0.6.RELEASE
mysqlVersion=5.1.47
liquibaseVersion=3.6.2

Why google app engine is throwing server error

Error: Server Error
The server encountered an error and could not complete your request.
Please try again in 30 seconds.
My google appengine is throwing this error after deploy. When I tested it locally it was working fine. The versions were went higher than 15 but I deleted the older versions and now only 2 versions are there. But still I am getting the same error. What can I do to make my appengine works? Please help.
Log
{
httpRequest: {"status": 500},
insertId: "5a0ffcb500011a4129d76be9",
labels: {
clone_id: "00c61b117c7f4bcf62b687c18a5e5cfdc5dca0ce0d2548d2e5ea797a1dd1d0e4f55478a746f842"
},
logName: "projects/xxxxxx/logs/appengine.googleapis.com%2Frequest_log",
operation: {
first: true,
id: "5a0ffcb200ff014bb267654c030001737e716f74692d37343133300001323031373131313874313433323033000100",
last: true,
producer: "appengine.googleapis.com/request_id",
},
protoPayload: {
#type: "type.googleapis.com/google.appengine.logging.v1.RequestLog",
appEngineRelease: "1.9.54",
appId: "s~xxx-xxxxx",
endTime: "2017-11-18T09:26:12.345119Z",
finished: true,
first: true,
host: "xxxx-xxxx.appspot.com",
httpVersion:"HTTP/1.1",
instanceId: "00c61b117c7f4bcf62b687c18a5e5cfdc5dca0ce0d2548d2e5ea797a1dd1d0e4f55478a746f842",
instanceIndex: -1,
ip: "122.xxx.xx.xxx",
latency: "2.260205s",
method: "POST",
pendingTime: "2.237488746s",
requestId: "5a0ffcb200ff014bb267654c030001737e716f74692d37343133300001323031373131313874313433323033000100",
resource: "/reports/standardReport",
startTime: "2017-11-18T09:26:10.084914Z",
status: 500,
urlMapEntry: "main.app",
userAgent: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36",
versionId: "20171118t143203",
},
receiveTimestamp: "2017-11-18T09:26:13.075858035Z",
resource: {
labels: {
module_id: "default",
project_id: "xxxx-xxxx",
version_id: "20171118t143203",
zone: "us12",
},
type: "gae_app",
},
timestamp: "2017-11-18T09:26:10.084914Z",
}
app.yaml
runtime: python27
api_version: 1.0
threadsafe: true
handlers:
- url: /.*
script: main.app
libraries:
- name: webapp2
version: latest
- name: pycrypto
version: latest
- name: ssl
version: latest
default_expiration: "30d"
automatic_scaling:
max_idle_instances: 1
min_idle_instances: 1 # can be set to 0 for check
max_concurrent_requests: 40 # can go upto 80
min_pending_latency: 30ms #default value

Resources