xpra icon
Bug tracker and wiki

This bug tracker and wiki are being discontinued
please use https://github.com/Xpra-org/xpra instead.


Ticket #2989: xpra-proxy-443.sh

File xpra-proxy-443.sh, 30.2 KB (added by louis-mulder, 4 months ago)

deploying a Xpra ingress-server, shared storage is based on NFS

Line 
1#!/bin/bash
2#
3# No changes on next lines are needed
4#
5# Between
6#
7# Begin ###########################################
8# and
9# End ###########################################
10#
11#
12OPTION=`echo ${1:-'--buildimages=no'}| sed -e 's/./\l&/g'` 
13case ${OPTION} in
14#(
15 --buildimages=no | buildimages=no |\
16 --buildimages=yes | buildimages=yes ) 
17   TOBUILD=`IFS=\= ; set -- ${OPTION} ; echo ${2}` ;export TOBUILD
18;;
19#(
20* )
21   echo  Usage: `basename ${0}` with building images --buildimages=yes
22   echo '        without omit option or use --buildimages=no'
23   exit 1
24;;
25esac
26#
27BASE_WORKING_DIR="${BASE_WORKING_DIR:-`dirname \`pwd\``}"/to_container ; export BASE_WORKING_DIR
28#
29#
30# Louis Mulder September 2020
31#
32# Deploy a Kubernetes base VDI environment on cluster
33# Below there are some variables to fill-in or to change
34# depending on your situation.
35#
36# Script is provided as it is.
37#
38# Check if you are running as root on the master
39#
40# Xpra is released under the terms of the GNU GPL v2, or, at your option, any
41# later version. See the file COPYING for details.
42#
43MASTER_IP_ADDR="`exec 2> /dev/null; kubectl cluster-info|grep -i master| sed -e 's%^.*//%%' -e 's%:.*$%%'`"
44#
45if ! ip a | grep "${MASTER_IP_ADDR}" 1> /dev/null 2>&1 || \
46    [ `id | sed -e 's/^.*=//' -e 's/(.*$//'` != 0 ]
47then
48   MASTER_NAME=`set -- \`getent hosts ${MASTER_IP_ADDR}\` ; echo ${2}`
49   echo "You must run `basename ${0}` as root on server ${MASTER_NAME:-'???'} with ip ${MASTER_IP_ADDR}"
50   exit 1
51fi
52DISTNAME='vdi-dist' ; export DISTNAME
53OLDIFS="${IFS}" ; export OLDIFS
54PROG=${0}
55BPROG=`basename ${PROG}`
56DPROG=`dirname ${PROG}`
57ABS_PATH=`(cd ${DPROG}; pwd)`
58export PROG
59case ${ABS_PATH} in
60#(
61     /*/${DISTNAME}/* ) 
62        ( echo "You must first copy the content of `dirname ${ABS_PATH}`"
63          echo "to new directory ending in directory-name which will be used as a"
64          echo "new NAMESPACE name. Then go to this directory/deploy and"
65          echo "adjust xpra-proxy.sh, and run it with ./xpra-proxy.sh from this position"
66        ) 1>&2
67        exit 1
68;;
69esac
70#
71#
72#
73# Xpra-proxy server will be exposed as service
74# And will accessable by external ip-addresses
75#
76# Be sure that this address is configured as a VIP on a worker/master
77# For HA use for example keepalived.
78#
79#
80EXTERNALIPS='10.7.6.237'   # Fill in the addresses separated by a space
81#
82# Ip address(es) where the service xpra-proxy can be accessed
83# from outside the cluster.
84# Portnumber on the outside is ${PROXY_PORT}
85# Proxy instance will be exposed internally with 8${PROXY_PORT}
86#
87PROXY_PORT=443 ;export PROXY_PORT
88#
89# Variables specified above the line OLDENV="`set | sed -e '....
90# will NOT included in the ../etc/xpra-vars.sh profile !!
91# If you want to add variables which must be used in the scripts etc.
92# in the session-containers or proxy-container(s) specify them after
93# the line # End ###########################################
94#
95# Don't remove next 3 lines !!!!
96OLDENV="`set | sed -e 's/=.*$/=/' -e '/^'"'"'/d'\
97                   -e 's%^%/^%' -e 's%$%/d%'`" # Don't remove this lines !!!!
98export OLDENV
99#
100# End ###########################################
101#
102# BEGINVARS Don't remove this line !!!!
103# Inventory where the user sessions may run.
104#
105# If XPRA_WORKERS is empty or has the value all (lower or uppercase)
106# No labeling will take place and session(s) may run everywhere.
107# if it contains a list of servernames these servers will be labeled
108# with xpra-worker=${NAMESPACE}
109#
110XPRA_WORKERS=''
111#
112export XPRA_WORKERS
113#
114XPRA_LOCAL_TIME=/usr/share/zoneinfo/Europe/Amsterdam
115#
116# localtime zone setting
117# Must be an absolute path to the timezone
118#
119# If empty default zone = /usr/share/zoneinfo/Europe/Amsterdam
120#
121export XPRA_LOCAL_TIME
122#
123# Default namespace will be derived off the current directory, remove 'deploy' and
124# take the basename of the result.
125#
126# Proxy Ingress server(s) will run in the namespace ingress-${NAMESPACE}
127#
128NAMESPACE="${NAMESPACE:-`basename \`dirname \\\`pwd\\\`\``}" ; export NAMESPACE
129#
130#
131XPRA_DEPLOYNAME='xpra-proxy'; export XPRA_DEPLOYNAME
132#
133# TOPDIRS and NFS server configuration
134#
135# Shared via a mount on the underlaying server (worker)
136#
137# BASE_WORKING_DIR == Full path of shared storage on the servers
138#
139# XPRA_TOPDIR_INT == mountpath in the pod/container of the distribution
140#
141SRV_NFS_SERVER='kubemaster01.vdi.xpra.demo' ; export SRV_NFS_SERVER
142SRV_NFS_SERVER_INT="${XPRA_SRV_NFS_SERVER_INT:-/srv}" ; export SRV_NFS_SERVER_INT
143SRV_NFS_SERVER_EXT="/export/data/srv/${NAMESPACE}/to_container" ; export SRV_NFS_SERVER_EXT
144#
145XPRA_TOPDIR_INT=${SRV_NFS_SERVER_INT:-/srv} ; export XPRA_TOPDIR_INT
146XPRA_TOPDIR_EXT=${SRV_NFS_SERVER_EXT:-/srv} ; export XPRA_TOPDIR_EXT
147#
148# If variable XPRA_DATALOCK_NFS_SERVER is specified and has the value of servername which is exporting
149# a directory it will be mounted under /incoming as readonly on a session pod which has also
150# shared homedir is mounted. (sessiontype with a prefix mhd)
151#
152# XPRA_DATALOCK_NFS_SERVER= ; export XPRA_DATALOCK_NFS_SERVER # No datalock server
153#
154DATALOCK_NFS_SERVER='vdi-worker01.vdi.xpra.demo' ; export DATALOCK_NFS_SERVER
155DATALOCK_NFS_SERVER_EXT='/export/home/outgoing' ; export DATALOCK_NFS_SERVER_EXT
156DATALOCK_NFS_SERVER_INT='/incoming' ; export DATALOCK_NFS_SERVER_EXT
157DATALOCK_NFS_SERVER_OPTIONS='readOnly: true'; export DATALOCK_NFS_SERVER_OPTIONS
158#
159# Proxy may create user directories
160# In session set it to readonly true
161#
162# If session is specified as mhd-XXXXX. XXXX stands for example desktop, seamless etc.
163# A shared directory/storage will be mounted in the pod and will be used as
164# placeholder for persitent homedirs.
165#
166# In case high-secured is desired remove all the sessions startups begginning with mhd-
167# out of the directory ../session_types (seen from the current ../deploy directory.
168#
169#
170HOME_NFS_SERVER='vdi-worker01.vdi.xpra.demo' ; export HOME_NFS_SERVER
171HOME_NFS_SERVER_EXT="${HOME_NFS_SERVER_EXT:-/export/home}" ;export HOME_NFS_SERVER_EXT
172HOME_NFS_SERVER_INT="${HOME_NFS_SERVER_INT:-/home}" ;export HOME_NFS_SERVER_INT
173HOME_NFS_SERVER_EXT_OPT_NFS='readOnly: false' export HOME_NFS_SERVER_EXT_OPT_NFS
174#
175# If user activate session with a prefix 'mhd' and it is permitted to mount
176# his/her global homedir set XPRA_USER_MHD to Y
177#
178XPRA_USER_MHD=Y ;export XPRA_USER_MHD
179#
180#
181# See also comment around the variable XPRA_SCRATCH_EXT
182#
183if [ "${XPRA_SCRATCH_EXT}" != '' -o "${XPRA_SCRATCH_EXT}" != 'N' -o "${XPRA_SCRATCH_EXT}" != 'n' ]
184then
185SHRDTMP_NFS_SERVER='kubemaster01.vdi.xpra.demo' ; export SHRDTMP_NFS_SERVER
186SHRDTMP_NFS_SERVER_INT="/shrd-tmp" ; export SHRDTMP_NFS_SERVER_INT
187SHRDTMP_NFS_SERVER_EXT="/export/data/srv/${NAMESPACE}/to_container/scratch/tmp" ; export SHRDTMP_NFS_SERVER_EXT
188SHRDTMP_NFS_SERVER_OPTIONS='readOnly: false'; export SHRDTMP_NFS_SERVER_OPTIONS
189#
190fi
191#
192SRVETC_NFS_SERVER='kubemaster01.vdi.xpra.demo' ; export SRVETC_NFS_SERVER
193SRVETC_NFS_SERVER_INT="/srv/etc" ; export SRVETC_NFS_SERVER_INT
194SRVETC_NFS_SERVER_EXT="/export/data/srv/${NAMESPACE}/to_container/etc" ; export SRVETC_NFS_SERVER_EXT
195SRVETC_NFS_SERVER_OPTIONS='readOnly: true'; export SRVETC_NFS_SERVER_OPTIONS
196#
197#
198SRVBIN_NFS_SERVER='kubemaster01.vdi.xpra.demo' ; export SRVBIN_NFS_SERVER
199SRVBIN_NFS_SERVER_INT="/srv/bin" ; export SRVBIN_NFS_SERVER_INT
200SRVBIN_NFS_SERVER_EXT="/export/data/srv/${NAMESPACE}/to_container/bin" ; export SRVBIN_NFS_SERVER_EXT
201SRVBIN_NFS_SERVER_OPTIONS='readOnly: true'; export SRVBIN_NFS_SERVER_OPTIONS
202#
203#
204STAGING_NFS_SERVER='vdi-worker01.vdi.xpra.demo' ; export STAGING_NFS_SERVER
205STAGING_NFS_SERVER_INT="/staging" ; export STAGING_NFS_SERVER_INT
206STAGING_NFS_SERVER_EXT="/export/home/staging" ; export STAGING_NFS_SERVER_EXT
207STAGING_NFS_SERVER_OPTIONS='readOnly: true'; export STAGING_NFS_SERVER_OPTIONS
208#
209# Don't expose staging in session-pods !!!
210#
211# Image registry server
212# Format FQDN:PORTNUMBER
213#
214XPRA_REGISTRY_SRV='registry.vdi.xpra.demo:5000'
215#
216export XPRA_REGISTRY_SRV
217# A default image must be specified and available
218#
219IMAGE_DEFAULT="${XPRA_REGISTRY_SRV}/vdi-xfce4" ;export IMAGE_DEFAULT
220#
221# Specific images per session type if desired.
222#
223IMAGE_DESKTOP_OFFICE="${XPRA_REGISTRY_SRV}/vdi-office" ;export IMAGE_DESKTOP_OFFICE
224IMAGE_SEAMLESS_OFFICE="${XPRA_REGISTRY_SRV}/vdi-office" ;export IMAGE_SEAMLESS_OFFICE
225IMAGE_DESKTOP="${XPRA_REGISTRY_SRV}/vdi-xfce4" ;export IMAGE_DESKTOP
226IMAGE_SEAMLESS="${XPRA_REGISTRY_SRV}/vdi-xfce4" ;export IMAGE_SEAMLESS
227IMAGE_XPRA_PROXY=${IMAGE_XPRA_PROXY:-"${XPRA_REGISTRY_SRV}/vdi-base"};export IMAGE_XPRA_PROXY
228#IMAGE_XPRA_PROXY="${XPRA_REGISTRY_SRV}/vdi-base-ubuntu" ; export IMAGE_XPRA_PROXY
229PRESTOP_CMD="${PRESTOP_CMD:-xpra stop}" ; export PRESTOP_CMD
230#
231#registry.do.not.where:5000/vdi-base
232#
233# Certificates
234# Accessing the xpra-proxy with websocket etc. must be done on a secure way !!
235#
236# Place the cert. files in ../ssl
237#
238SSL=on
239SSL_CERT=/etc/xpra/ssl/server.crt
240SSL_KEY=/etc/xpra/ssl/server.key
241#
242# If XPRA_DEMO_USERS=Y and on the .../etc directory the files
243# demousers-passwd and demousers-shadow are available
244# these files will be appended in the pod in /etc/passwd and
245# /etc/shadow
246#
247# Format is the same as for shadow and passwd
248#
249# With a useradd and chpasswd a existing passwd/shadow can be append
250# in the first stage. With a copy/paste insert the created demo-users
251# in demousers-passwd and ---shadow.
252#
253# default added a demouser-passwd/--shadow with xpra-user01--05 and
254# a simple password 'only4now'
255#
256XPRA_DEMO_USERS=Y
257export XPRA_DEMO_USERS
258#
259#
260# Domainname (DNS)
261#
262DOMNAME="${DOMNAME:-vdi.xpra.demo}"
263#
264# Sessions using IDM/IPA
265# If IDM_DOMAIN = empty only the
266# local passwd file will be used to determine
267# the users UID/GID and validating
268# Idm/Freeipa is also installed in the session-pods
269#
270# See also the variable SESSION_USING_IDM
271# If you using for large amounts of users be sure you have
272# more freeipa/IDM servers.
273# If users has a 2 factor auth. The pam_auth xpra-module cannot
274# handle this.
275# But: If a OTP is found in the users credentials the user needs
276# only this as password for validation. See also the pam service
277# xpra. The standard generated OTP will not work because it length
278# is too long (key), it is not compatible with the oath pam-module.
279# How to work around is as follows:
280#
281# Install the packages gen-oath-safe and oathtool
282# Generate a token and use the key as key input for freeipa/IDM server
283# during the creation of a token for a user.
284#
285# Example:
286# Generate key with
287#  on the shell-prompt:
288#    gen-oath-safe  totp
289#
290# INFO: Bad or no token type specified, using TOTP.
291# INFO: No secret provided, generating random secret.
292#
293# Key in Hex: 7b43210b0d981195b5e68875eb1f94b28d6a6103
294# Key in b32: PNBSCCYNTAIZLNPGRB26WH4UWKGWUYID (checksum: 8)
295#
296# URI: otpauth://totp/totp?secret=PNBSCCYNTAIZLNPGRB26WH4UWKGWUYID
297#
298# <DISPLAY OF THE QR-CODE>
299#                                                               
300# users.oath / otp.users configuration:
301# HOTP/T30 totp - 7b43210b0d981195b5e68875eb1f94b28d6a6103
302#
303# take the string: PNBSCCYNTAIZLNPGRB26WH4UWKGWUYID paste it in
304# keyfield of the popup of the OTP-window or use it as
305# option value when creating a user with the CLI of ipa/idm.
306#
307# The script used by PAM /bin/pre-auth.sh searches the
308# LDAP environment for the OTP of a user and place it in /etc/oath/users.oath
309# The pam-module pam_oath.so will look in this file.
310#
311# It works for the tcp_auth option and when you are using ssh as transport-channel.
312# However by using ssh you need give two times a OTP. In most cases you need
313# generate the OTP twice.
314#
315# For generating a OTP use FreeOtp or on a Linux command prompt you can generate
316# the OTP with oathtool
317# For example:
318# oathtool -w0 --totp -b PNBSCCYNTAIZLNPGRB26WH4UWKGWUYID
319#
320# And will give you the otp as output -- 683205
321
322# Using: websocket or ssl (creates a ssl-tunnel between the proxy-server and your workplace)
323#
324#  Obtaining the ca-certificate can be done by first logging in with a browser
325#  and exporting the certificate.
326#
327#  xpra attach wss://....@srvname:portnr/session-type --ssl-ca-certs=<CA Cert file>
328#
329#  xpra attach ssh://....@srvname:portnr/session-type
330#  If a OTP is created in the ipa/idm environment you need to generate a OTP 2 times.
331#
332#  Xpra will allow you to use a plain in the clear data transmission !
333#
334# In most cases the ipa/idm domainame is equal to the DNS-domainname
335# If not change the line below or when NOT using ipa/idm set the variable
336# to empty
337#
338IDM_DOMAIN="${IDM_DOMAIN:-${DOMNAME}}" ; export IDM_DOMAIN
339#IDM_DOMAIN="" ; export IDM_DOMAIN
340#
341# IDM_ADMIN_PASSWORD, password of dirsrv specified as base64
342# string
343# Example: On the prompt
344#          echo -n changeMe | base64
345#          And take the result.
346#
347#  Don't forget the '-n' argument of echo
348#
349IDM_ADMIN_USER=${IDM_ADMIN_USER:-'YWRtaW4='} ; export IDM_ADMIN_USER
350IDM_ADMIN_PASSWORD='b25seTRub3c=' ; export IDM_ADMIN_PASSWORD
351#
352# Todo future
353#
354XPRA_VALIDATE_USER='Y' ; export XPRA_VALIDATE_USER
355#
356XPRA_AUTH_METHOD="${XPRA_AUTH_METHOD:-tcp,pamexec:service=login:command=/srv/bin/start_pod.sh:timeout=900}" 
357export XPRA_AUTH_METHOD
358#
359# if SESSION_USING_IDM=Y and IDM_DOMAIN is NOT empty idm/Freeipa will be
360# configured in the session-pod of the user
361# if SESSION_USING_IDM=N and IDM_DOMAIN is NOT empty idm/Freeipa will be
362# not installed in the session-pod. Idm/Freeipa will be activated only in
363# the XPRA proxy server. User uid/gid information will be provided but passing
364# a Env. variable to the session-pod.
365#
366SESSION_USING_IDM='N' ; export SESSION_USING_IDM
367#
368# Shared scratch directory
369#
370# ${XPRA_SCRATCH_EXT}/tmp will be mounted in the pod
371# as /shrd-tmp and is read/writeable like /tmp
372# Users can change files etc. between them.
373#
374# However if it is forbidden to copy in or out data from
375# underlying server(s) set it to N
376#
377#
378XPRA_SCRATCH_EXT=${BASE_WORKING_DIR}/scratch/tmp ;export XPRA_SCRATCH_EXT
379#
380# If next variables contains an external directory
381# Sockets and Status of Xpra will outside the container
382# available.
383#
384XPRA_STATUS_DIR=${XPRA_TOPDIR_INT}/xpra-status ; export XPRA_STATUS_DIR
385XPRA_SOCKET_DIR=${XPRA_TOPDIR_INT}/xpra-socket ; export XPRA_SOCKET_DIR
386#
387# Number of proxy-instances initial
388# For explanation see the Kubernetes documentation
389#
390REPLICAS=1
391#
392# Default xpra has a maximum of 100 concurrent connections
393# To overrule with XPRA_MAX_CONCURRENT_CONNECTIONS
394#
395XPRA_MAX_CONCURRENT_CONNECTIONS=${XPRA_MAX_CONCURRENT_CONNECTIONS:-1024}
396#
397export XPRA_MAX_CONCURRENT_CONNECTIONS
398#
399# Special settings XPRA and waiting times (sleep)
400# may delete#
401# Don't change unless you know what you are doing....
402#
403SECRET_NAME_PROXY="${XPRA_DEPLOYNAME}-certs" ; export SECRET_NAME_PROXY
404SECRET_NAME_CERTS="${SECRET_NAME_PROXY}" ; export SECRET_NAME_CERTS
405SECRET_NAME_KUBE="${XPRA_DEPLOYNAME}-kube"  ; export XPRA_DEPLOYNAME
406XPRA_SOCKET_TIMEOUT=180 ; export XPRA_SOCKET_TIMEOUT
407XPRA_PING_TIMEOUT=120 ; export XPRA_PING_TIMEOUT
408XPRA_PROXY_SOCKET_TIMEOUT=0.8 ;export XPRA_PROXY_SOCKET_TIMEOUT
409XPRA_PROXY_WS_TIMEOUT=0.8;export XPRA_PROXY_WS_TIMEOUT
410XPRA_CONNECT_TIMEOUT=60 ; export XPRA_CONNECT_TIMEOUT
411XPRA_EXEC_AUTH_TIMEOUT=900; export XPRA_EXEC_AUTH_TIMEOUT
412#
413XPRA_STARTUP_PROXY="${XPRA_TOPDIR_INT}/bin/startup_proxy.sh ${XPRA_TOPDIR_INT}/bin/start_or_get_pod.sh"
414#XPRA_STARTUP_PROXY="/bin/sleep 17200" # Startup proxy for debugging etc. Pod will startup only with a sleep of 17200 sec.
415PRE_STOP_CMD="/usr/bin/xpra stop"
416LIVENESS_PROBE_CMD="${XPRA_TOPDIR_INT}/bin/health_check_xpra-proxy.sh"
417PORT="${PORT:-14500}" ;export PORT
418#
419XPRA_SERVER_CRT="${BASE_WORKING_DIR}/../ssl/server.crt"
420XPRA_SERVER_KEY="${BASE_WORKING_DIR}/../ssl/server.key"
421# may delete#
422# may deleteVOLUME_SRVDIR="${BASE_WORKING_DIR}"      ; export VOLUME_SRVDIR
423# may deleteVOLUME_SSLDIR="${BASE_WORKING_DIR}"'/ssl'; export VOLUME_SSLDIR
424# may delete#
425#
426EMPTYDIR="/tmp/em${$}ty" export EMPTYDIR
427#
428SECRET_NAME_IDM=""
429[ "${IDM_DOMAIN}" != '' ] && \
430   SECRET_NAME_IDM="join-idm-`echo ${IDM_DOMAIN}| sed -e 's/\./-/g'`"
431export SECRET_NAME_IDM
432#
433export SSL SSL_CERT SSL_KEY DOMNAME
434export XPRA_SERVER_CRT XPRA_SERVER_KEY
435#
436# Be sure directories are available
437#
438[ "${XPRA_SCRATCH_EXT}" != '' -a ! -d "${XPRA_SCRATCH_EXT}" ] && mkdir -p ${XPRA_SCRATCH_EXT} 2> /dev/null
439#
440if [ -d "${XPRA_SCRATCH_EXT}/." ]
441then
442   chown root:root ${XPRA_SCRATCH_EXT}
443   chmod 700 `dirname ${XPRA_SCRATCH_EXT}`
444   chown root:root ${XPRA_SCRATCH_EXT}
445   chmod 1777 ${XPRA_SCRATCH_EXT}
446fi
447
448[ ! -d "${BASE_WORKING_DIR}/save-states/." ] && mkdir -p  "${BASE_WORKING_DIR}/save-states"
449
450#
451# Source general SHELL functions
452#
453if [ -f "${BASE_WORKING_DIR}/etc/xpra-functions.sh" ]
454then
455  . "${BASE_WORKING_DIR}/etc/xpra-functions.sh"
456else
457 echo "Can't find file ${BASE_WORKING_DIR}/etc/xpra-functions.sh"  1>&2
458 exit 3
459fi
460generate_xpra_proxy () { #  Don't remove this line and must be begin at column 0 !!!
461#
462(
463XPRA_MODE='proxy' ; export XPRA_MODE
464cat <<EOB
465kind: Namespace
466apiVersion: v1
467metadata:
468  name: ingress-${NAMESPACE}
469  labels:
470    name: ingress-${NAMESPACE}
471---
472kind: Namespace
473apiVersion: v1
474metadata:
475  name: ${NAMESPACE}
476  labels:
477    name: ${NAMESPACE}
478`if [ "${IDM_DOMAIN}" != '' ]
479then
480echo '---'
481echo 'apiVersion: v1'
482echo 'kind: Secret'
483echo 'metadata:'
484echo '  name: "'"${SECRET_NAME_IDM}"'"'
485echo '  namespace: '"ingress-${NAMESPACE}"
486echo 'data:'
487echo '  dirsrv-password: "'"${IDM_ADMIN_PASSWORD}"'"'
488echo '  idm-admin-password: "'"${IDM_ADMIN_PASSWORD}"'"'
489echo '  idm-admin-user: "'"${IDM_ADMIN_USER}"'"'
490echo 'type: Opaque'
491echo '---'
492echo 'apiVersion: v1'
493echo 'data:'
494echo '  idm-admin-user: "'"${IDM_ADMIN_USER}"'"'
495echo '  idm-admin-password: "'"${IDM_ADMIN_PASSWORD}"'"'
496echo 'kind: Secret'
497echo 'metadata:'
498echo '  name: '"${SECRET_NAME_IDM}"
499echo '  namespace: '"${NAMESPACE}"
500echo 'type: Opaque'
501fi`
502---
503apiVersion: v1
504kind: Service
505metadata:
506  labels:
507    app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT}
508  name: ${XPRA_DEPLOYNAME}-8${PROXY_PORT}
509  namespace: ingress-${NAMESPACE}
510spec:
511  externalIPs:
512`for ip in ${EXTERNALIPS}
513do
514echo "  - ${ip}"
515done`
516  externalTrafficPolicy: Cluster
517  ports:
518  - port: ${PROXY_PORT:-443}
519    protocol: TCP
520    targetPort: 8${PROXY_PORT:-443}
521  selector:
522    app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443}
523  type: NodePort
524---
525apiVersion: v1
526kind: Service
527metadata:
528  labels:
529    app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT}
530  name: ${XPRA_DEPLOYNAME}-8`expr \`echo ${PROXY_PORT:-443}\` + 1`
531  namespace: ingress-${NAMESPACE}
532spec:
533  externalIPs:
534`for ip in ${EXTERNALIPS}
535do
536echo "  - ${ip}"
537done`
538  externalTrafficPolicy: Cluster
539  ports:
540  - port: `expr \`echo ${PROXY_PORT:-443}\` + 1`
541    protocol: TCP
542    targetPort: 8444
543  selector:
544    app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443}
545  type: NodePort
546---
547apiVersion: v1
548kind: ConfigMap
549metadata:
550    name: xpra-env
551    namespace: ingress-${NAMESPACE}
552data:
553   SESSION_USING_IDM: "${SESSION_USING_IDM}"
554   POD_FROM_NAMESPACE: "ingress-${NAMESPACE}"
555   SRC_PORT: "8${PROXY_PORT}"
556   PROTO_TYPE: "${PROTO_TYPE:-tcp}"
557   PASSWD_ENTRY: "${PASSWD_ENTRY}"
558   SSL: "${SSL}"
559   SSL_CERT: "${SSL_CERT}"
560   SSL_KEY: "${SSL_KEY}"
561   GROUP_ENTRIES: "${GROUP_ENTRIES}"
562   SECRET_NAME_CERTS: "${SECRET_NAME_CERTS}"
563
564`for var in \`env | sort -u | sed -e '/\(^[Xx][Pp][Rr][Aa]_[A-Za-z0-9_][A-Za-z0-9_]*\)\(=\)\(..*\)/!d' \
565                                -e 's/=.*$//'\`
566do
567   eval echo '\ \ \ '"${var}"': \"''$'"${var}"'\"'
568done`
569   
570`if [ "${IDM_DOMAIN}" != '' ]
571then
572echo '   IDM_DOMAIN: "'"${IDM_DOMAIN}"'"'
573echo '   USE_OTP_PW: "'"${USE_OTP_PW}"'"'
574echo '   NAMESPACE: "'"${NAMESPACE}"'"'
575fi`
576---
577apiVersion:  apps/v1
578kind: Deployment
579metadata:
580  name: ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443}
581  namespace: ingress-${NAMESPACE}
582  labels:
583    app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443}
584spec:
585  replicas: ${REPLICAS}
586  selector:
587    matchLabels:
588      app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443}
589  template:
590    metadata:
591      labels:
592        app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443}
593    spec:
594      containers:
595      - name:  ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443}
596        image: "${IMAGE_XPRA_PROXY:-${IMAGE_DEFAULT}}"
597        securityContext:
598          capabilities:
599             add: ["NET_ADMIN", "SYS_TIME","CAP_SYS_ADMIN","SYS_ADMIN"]
600        command: ["/bin/bash","-c" ]
601        args: ["${XPRA_STARTUP_PROXY}"]
602        envFrom:
603        - configMapRef:
604            name: xpra-env
605        lifecycle:
606          preStop:
607            exec:
608              # SIGTERM triggers a quick exit; gracefully terminate instead
609              command: ["/bin/bash", "-c", "${PRE_STOP_CMD}"]
610        livenessProbe:
611            initialDelaySeconds: 90
612            periodSeconds: 10
613            timeoutSeconds: 20
614            failureThreshold: 5
615            exec:
616               command:
617               - "${LIVENESS_PROBE_CMD}"
618        volumeMounts:
619`nfs_generate_volume_mounts`
620        - mountPath: /sys/fs/cgroup
621          name: sys-fs-cgroup
622          readOnly: true
623        - mountPath: /dev/shm
624          name: dshm
625`if [ "${IDM_DOMAIN}" != '' ]
626then
627echo '        - mountPath: /etc/join-idm-'\`echo "${IDM_DOMAIN}"| sed -e 's/\./-/g'\`
628echo '          name: join-idm-'\`echo "${IDM_DOMAIN}"| sed -e 's/\./-/g'\`
629echo '          readOnly: true'
630fi`
631        - mountPath: `(IFS=\:;set -- \`getent passwd root\`; echo ${6})`/.kube
632          name: ${SECRET_NAME_KUBE}
633          readOnly: true
634`if [ "${SECRET_NAME_CERTS}" != '' ]
635then
636echo '        - mountPath: /etc/xpra/ssl/crt'
637echo '          name: "'"${SECRET_NAME_CERTS}-crt"'"'
638echo '          readOnly: true'
639echo '        - mountPath: /etc/xpra/ssl/key'
640echo '          name: "'"${SECRET_NAME_CERTS}-key"'"'
641echo '          readOnly: true'
642fi`
643      volumes:
644`nfs_generate_volumes`
645      - name: sys-fs-cgroup
646        hostPath:
647         path: /sys/fs/cgroup
648         type: Directory
649      - name: dshm
650        emptyDir:
651          medium: Memory
652#######
653`if [ "${IDM_DOMAIN}" != '' ]
654then
655echo '      - name: "'"${SECRET_NAME_IDM}"'"'
656echo '        secret:'
657echo '            secretName: "'"${SECRET_NAME_IDM}"'"'
658echo '            defaultMode: 256'
659fi`
660      - name: "${SECRET_NAME_KUBE}"
661        secret:
662          secretName: "${SECRET_NAME_KUBE}"
663          defaultMode: 256
664`if [ "${SECRET_NAME_CERTS}" != '' ]
665then
666echo '      - name: "'"${SECRET_NAME_CERTS}-key"'"'
667echo '        secret:'
668echo '           secretName: "'"${SECRET_NAME_CERTS}-key"'"'
669echo '           defaultMode: 256'
670echo '      - name: "'"${SECRET_NAME_CERTS}-crt"'"'
671echo '        secret:'
672echo '           secretName: "'"${SECRET_NAME_CERTS}-crt"'"'
673echo '           defaultMode: 292'
674fi`
675`if [ "${XPRA_WORKERS}" != '' -o "\`echo "${XPRA_WORKERS}" | sed -e 's/./\l&/g'\`" != 'all' ]
676then
677echo '      nodeSelector:'
678fi`
679      restartPolicy: Always
680EOB
681) 
682}
683
684
685node_labeling() {
686 
687   for srv in ${XPRA_WORKERS}
688   do
689     kubectl label nodes  ${srv} "xpra_run_${NAMESPACE}"'=true'  --overwrite=true
690   done
691}
692
693do_some_hardening() {
694 
695 [ ${BASE_WORKING_DIR} != '' -a -d ${BASE_WORKING_DIR}/. ] && chmod 755 ${BASE_WORKING_DIR}/.
696
697 EXCLUDE=`basename ${BASE_WORKING_DIR}`
698
699 for dir in ${BASE_WORKING_DIR}/../*
700 do
701   if [ -d "${dir}"/. -a "`basename ${dir}`" != "${EXCLUDE}" ]
702   then
703       chown root:root "${dir}"/.
704       chmod 700 "${dir}"/.
705   fi
706 done
707 if [ "${XPRA_SCRATCH_EXT}" != '' ]
708 then
709    [ "{XPRA_SCRATCH_EXT}" != '' -a ! -d `dirname "${XPRA_SCRATCH_EXT}"`/. ] && mkdir -p "${XPRA_SCRATCH_EXT}"
710    chmod 700 `dirname "${XPRA_SCRATCH_EXT}"`/.
711    chmod 1777 "${XPRA_SCRATCH_EXT}"/.
712 fi
713 if [ "${XPRA_STATUS_DIR}" != '' ]
714 then
715    SRC_PTH="${BASE_WORKING_DIR}/`basename ${XPRA_STATUS_DIR}`"
716    [ ! -d "${SRC_PTH}" ] && mkdir -p "${SRC_PTH}"
717    chmod 1777 "${SRC_PTH}"
718 fi
719 if [ "${XPRA_SOCKET_DIR}" != '' ]
720 then
721    SRC_PTH="${BASE_WORKING_DIR}/`basename ${XPRA_SOCKET_DIR}`"
722    [ ! -d "${SRC_PTH}" ] && mkdir -p "${SRC_PTH}"
723    chmod 1777 "${SRC_PTH}"
724 fi
725 [ "${SRC_PTH}" != '' ] && unset SRC_PTH
726}
727
728generate_xpra_vars_sh () {
729#
730# unset all shell functions
731#
732cat <<EOB
733#!/bin/sh
734#
735#--------------------------------------------------------------------------------#
736# Lines below may change depending on your Caas/Openshift/Kubernetes environment #
737# Don't remove the if statement with corresponding fi statement. When script     #
738# is starting up under UID 0 (root) it will switch over to non-root user.        #
739# (${RUNADUSER})                                                                 #
740# Starting up a Pod with be done with the user as specified in the variable      #
741# USERNAME_RUNASUSER. Be sure this user has a ${HOME}/.kube directory containing #
742# a valid config readable file. (Copy of the kubemaster /etcubernetes/admin.conf#
743#                                                                                #
744# Louis Mulder 2020                                                              #
745# Xpra is released under the terms of the GNU GPL v2, or, at your option, any    #
746# later version. See the file COPYING for details.                               #
747#--------------------------------------------------------------------------------#
748#
749EOB
750(
751for fnc in `declare -F | sed 's/.* //'`
752do
753  unset "${fnc}"
754done
755unset fnc
756set | sed -e '{
757                '"${OLDENV}"'
758                /^[A-Za-z0-9_].*[Pp][Aa][Ss][Ss][Ww][Oo][Rr][Dd].*$/d
759                /^[Pp][Aa][Ss][Ss][Ww][Oo][Rr][Dd].*$/d
760                /^[Pp][Aa][Ss][Ss][Ww][Oo][Rr][Dd].*$/d
761                /\/\^/d
762                /^'"'"'/d
763                /^[Pp][Ii][Pp][Ee][Ss][Tt][Aa][Tt][Uu][Ss]=.*/d
764                /^[Ff][Uu][Nn][Cc][Nn][Aa][Mm][Ee]/d
765                /^_=/d
766                s/\(^[A-Za-z_]\)\([0-9A-Za-z_]*\)\(=\)\(.*$\)/\1\2\3\4 ; export \1\2/
767              }'
768) 
769}
770
771gen_namespaces_certs_secrets () {
772#
773SSLDIR_TMP=/tmp/ssl${$}
774mkdir -p  ${SSLDIR_TMP}
775cp ${XPRA_SERVER_KEY} ${SSLDIR_TMP}/.
776cp ${XPRA_SERVER_CRT} ${SSLDIR_TMP}/.
777
778OLDPWD=`pwd`
779if cd ${SSLDIR_TMP}
780then
781(
782cat <<EOB
783kind: Namespace
784apiVersion: v1
785metadata:
786  name: ingress-${NAMESPACE}
787  labels:
788    name: ingress-${NAMESPACE}
789---
790kind: Namespace
791apiVersion: v1
792metadata:
793  name: ${NAMESPACE}
794  labels:
795    name: ${NAMESPACE}
796EOB
797) | kubectl apply -f -
798#
799cat <<EOF >./kustomization.yaml
800secretGenerator:
801- name: ${SECRET_NAME_PROXY}-key
802  namespace: ${NAMESPACE}
803  files:
804  - `basename ${XPRA_SERVER_KEY}`
805EOF
806kubectl apply -k .
807sec_name="`kubectl -n ${NAMESPACE} get secrets | grep "${SECRET_NAME_PROXY}-key-"| sed -e 's/ *[Oo][Pp].*$//'| head -1`"
808DATA="`kubectl -n ${NAMESPACE} get secrets "${sec_name}" -o yaml`"
809#
810for sec in ${NAMESPACE} ingress-${NAMESPACE}
811do
812   echo "${DATA}" |\
813   sed -e '/[Kk][Ii][Nn][Dd].*[Ss][Ee][Cc][Rr][Ee][Tt]/,$d' \
814       -e 's/^[Dd][Aa][Tt][Aa]/kind: Secret\nmetadata:\n   name: '"${SECRET_NAME_PROXY}-key"'\n   namespace: '"${sec}"'\ntype: Opaque\n&/'
815   echo '---'
816done | kubectl apply -f -
817kubectl -n ${NAMESPACE} delete secrets "${sec_name}"
818#
819cat <<EOF >./kustomization.yaml
820secretGenerator:
821- name: ${SECRET_NAME_PROXY}-crt
822  namespace: ${NAMESPACE}
823  files:
824  - `basename ${XPRA_SERVER_CRT}`
825EOF
826kubectl apply -k .
827sec_name="`kubectl -n ${NAMESPACE} get secrets | grep "${SECRET_NAME_PROXY}-crt-"| sed -e 's/ *[Oo][Pp].*$//'| head -1`"
828DATA="`kubectl -n ${NAMESPACE} get secrets "${sec_name}" -o yaml`"
829#
830for sec in ${NAMESPACE} ingress-${NAMESPACE}
831do
832   echo "${DATA}" |\
833   sed -e '/[Kk][Ii][Nn][Dd].*[Ss][Ee][Cc][Rr][Ee][Tt]/,$d' \
834       -e 's/^[Dd][Aa][Tt][Aa]/kind: Secret\nmetadata:\n   name: '"${SECRET_NAME_PROXY}-crt"'\n   namespace: '"${sec}"'\ntype: Opaque\n&/'
835   echo '---'
836done | kubectl apply -f -
837kubectl -n ${NAMESPACE} delete secrets "${sec_name}"
838#
839unset DATA sec_name sec
840fi
841[ "${SSLDIR_TMP}" != '' -a -d  "${SSLDIR_TMP}"/. ] && rm -rf  "${SSLDIR_TMP}"
842unset SSLDIR_TMP
843cd ${OLDPWD}
844}
845gen_namespaces_kube_config_secrets() {
846#
847DIR_TMP=/tmp/kube${$}
848mkdir -p  ${DIR_TMP}
849KUBE_CONFIG=${KUBE_CONFIG:-/etc/kubernetes/admin.conf}
850#
851if [ -f ${KUBE_CONFIG} ]
852then
853   cp "${KUBE_CONFIG}" ${DIR_TMP}/config
854else
855   echo 'Are you on a kubemaster, no '${KUBE_CONFIG}' found' 1>&2
856   [ "${DIR_TMP}" != '' -a -d "${DIR_TMP}" ] && rm -rf "${DIR_TMP}"
857   exit 1
858fi
859OLDPWD=`pwd`
860if cd ${DIR_TMP}
861then
862(
863cat <<EOB
864kind: Namespace
865apiVersion: v1
866metadata:
867  name: ingress-${NAMESPACE}
868  labels:
869    name: ingress-${NAMESPACE}
870---
871kind: Namespace
872apiVersion: v1
873metadata:
874  name: ${NAMESPACE}
875  labels:
876    name: ${NAMESPACE}
877EOB
878) | kubectl apply -f -
879#
880cat <<EOF >./kustomization.yaml
881secretGenerator:
882- name: ${SECRET_NAME_KUBE}
883  namespace: ingress-${NAMESPACE}
884  files:
885  - config
886EOF
887cp ./kustomization.yaml /var/tmp/kust2
888kubectl apply -k .
889sec_name="`kubectl -n ingress-${NAMESPACE} get secrets | grep "${SECRET_NAME_KUBE}-" | sed -e 's/ *[Oo][Pp].*$//'| head -1`"
890DATA="`kubectl -n ingress-${NAMESPACE} get secrets "${sec_name}" -o yaml`"
891
892sec=ingress-${NAMESPACE}
893(
894   echo "${DATA}" |\
895   sed -e '/[Kk][Ii][Nn][Dd].*[Ss][Ee][Cc][Rr][Ee][Tt]/,$d' \
896       -e 's/^[Dd][Aa][Tt][Aa]/kind: Secret\nmetadata:\n   name: '"${SECRET_NAME_KUBE}"'\n   namespace: '"${sec}"'\ntype: Opaque\n&/'
897   echo '---'
898) | kubectl apply -f -
899kubectl -n ingress-${NAMESPACE} delete secrets "${sec_name}"
900unset DATA sec_name sec
901fi
902[ "${DIR_TMP}" != '' -a -d  "${DIR_TMP}"/. ] && rm -rf  "${DIR_TMP}"
903unset DIR_TMP
904cd ${OLDPWD}
905}
906#
907build_images () {
908if cd `dirname ${BASE_WORKING_DIR}`/images
909then
910
911 # find the base
912 VDI_BASE='' ;export VDI_BASE
913 VDI_BUILDLIST='' ;export VDI_BUILDLIST
914
915 for d in *
916 do
917   if [ -d ${d}/. ]
918   then
919    case ${d} in
920    #(
921    *[Bb][Aa][Ss][Ee]* )
922       VDI_BASE="${d}" 
923    ;;
924    #(
925    * )
926      VDI_BUILDLIST="${VDI_BUILDLIST:+${VDI_BUILDLIST} ${d}}"
927      VDI_BUILDLIST="${VDI_BUILDLIST:-${d}}"
928    ;;
929    esac
930   fi
931 done
932
933 if [ "${VDI_BASE}" = '' ]
934 then
935   echo "No base docker build image directory find `pwd` "'(vdi-base ???)' 1>&2
936   exit 1
937 fi
938
939 # Perform the base
940 if cd ${VDI_BASE}
941 then
942   sed -e '1 i\'"FROM centos:7" < Dockerfile.tmpl > Dockerfile
943   docker build -t ${XPRA_REGISTRY_SRV}/${VDI_BASE} .
944   docker push ${XPRA_REGISTRY_SRV}/${VDI_BASE}
945 fi
946 cd ..
947 for dir in ${VDI_BUILDLIST}
948 do
949 if cd ${dir}
950 then
951  sed -e '1 i\'"FROM ${XPRA_REGISTRY_SRV}/${VDI_BASE}" < Dockerfile.tmpl > Dockerfile
952  docker build -t ${XPRA_REGISTRY_SRV}/${dir} .
953  docker push ${XPRA_REGISTRY_SRV}/${dir}
954  cd ..
955 fi
956 done
957 unset d dir VDI_BUILDLIST VDI_BASE
958else
959 echo "Huh no `dirname ${BASE_WORKING_DIR}`/images directory....." 1>&2
960 exit 1
961fi
962}
963#
964TODO=`grep -n 'ADJUST' < ${PROG} | sed -e 's/=.*$//' -e '/^[0-9][0-9]*:TODO/d'`
965if [ "${TODO}" != '' ]
966then
967 (
968  for item in ${TODO}
969  do
970    eval `IFS=':' ; set -- ${item} ; echo "LINE=${1};VAR=${2}"`
971    echo 'You need to adjust or give (a) values(s) for parameter '${VAR} at line ${LINE}
972  done
973  ) 1>&2
974  exit 1
975fi
976#
977[ "${TOBUILD}" = 'yes' ] && build_images
978gen_namespaces_certs_secrets
979gen_namespaces_kube_config_secrets
980generate_xpra_vars_sh > ${BASE_WORKING_DIR}/etc/xpra-vars.sh
981generate_xpra_proxy | tee ${BASE_WORKING_DIR}/../yaml/xpra-proxy-8${PROXY_PORT}.yaml | kubectl apply -f -
982do_some_hardening
983node_labeling
984#