added
library/cloudflare_dns_records.py
+ #!/usr/bin/python+ """+ Ansible module for fetching Cloudflare DNS records.+ + This module provides functionality to fetch DNS records from Cloudflare+ for a specific zone and record name.+ + Example Usage:+ # Fetch DNS records for a domain+ - name: Fetch Cloudflare DNS records+ cloudflare_dns_records:+ auth_email: "user@example.com"+ auth_key: "api_key_here"+ zone_name: "example.com"+ record: "subdomain.example.com"+ register: dns_records+ + # Fetch DNS records using API token (recommended)+ - name: Fetch Cloudflare DNS records with token+ cloudflare_dns_records:+ auth_token: "token_here"+ zone_name: "example.com"+ record: "subdomain.example.com"+ register: dns_records+ + Return Values:+ records:+ description: List of DNS records matching the query+ type: list+ returned: success+ zone_id:+ description: The Cloudflare zone ID for the specified zone+ type: str+ returned: success+ changed:+ description: Whether any changes were made (always False for this read-only module)+ type: bool+ returned: always+ """+ + from __future__ import annotations+ + from typing import TYPE_CHECKING+ + from ansible.module_utils.basic import AnsibleModule+ + if TYPE_CHECKING:+ from cloudflare import Cloudflare+ + DOCUMENTATION = """+ ---+ module: cloudflare_dns_records+ short_description: Fetch DNS records from Cloudflare+ description:+ - "This module fetches DNS records from Cloudflare for a specific zone and record name."+ - "It supports both API key and API token authentication methods."+ options:+ auth_email:+ description:+ - Email associated with Cloudflare account+ - Required when using auth_key authentication+ required: false+ type: str+ auth_key:+ description:+ - API key for Cloudflare+ - Required when using auth_key authentication+ required: false+ type: str+ auth_token:+ description:+ - API token for Cloudflare (recommended over auth_key)+ - Can be used instead of auth_email and auth_key+ required: false+ type: str+ zone_name:+ description:+ - Name of the Cloudflare zone (e.g., example.com)+ required: true+ type: str+ record:+ description:+ - DNS record name to fetch (e.g., subdomain.example.com)+ required: true+ type: str+ """+ + EXAMPLES = """+ # Fetch DNS records using API key+ - name: Fetch Cloudflare DNS records with API key+ cloudflare_dns_records:+ auth_email: "user@example.com"+ auth_key: "{{ cloudflare_api_key }}"+ zone_name: "example.com"+ record: "app.example.com"+ register: dns_records+ + # Fetch DNS records using API token (recommended)+ - name: Fetch Cloudflare DNS records with token+ cloudflare_dns_records:+ auth_token: "{{ cloudflare_api_token }}"+ zone_name: "example.com"+ record: "app.example.com"+ register: dns_records+ + # Access the records+ - name: Display records+ debug:+ var: dns_records.records+ """+ + + def get_zone_id(client: Cloudflare, zone_name: str, module: AnsibleModule) -> str:+ """+ Fetch the zone ID for a given zone name from Cloudflare.+ + Args:+ client: Cloudflare client instance+ zone_name (str): Name of the zone to look up+ module: AnsibleModule instance for error reporting+ + Returns:+ str: The zone ID+ + Raises:+ Calls module.fail_json on error+ """+ try:+ zone = client.zones.list(name=zone_name)+ if len(zone.result) == 0:+ module.fail_json(msg=f"Specified zone '{zone_name}' was not found")+ return zone.result[0].id+ except Exception as e:+ module.fail_json(msg=f"Error fetching zone ID: {str(e)}")+ + + def fetch_dns_records(client: Cloudflare, zone_id: str, record_name: str, module: AnsibleModule) -> list[dict[str, object]]:+ """+ Fetch DNS records from Cloudflare.+ + Args:+ client: Cloudflare client instance+ zone_id (str): The Cloudflare zone ID+ record_name (str): The DNS record name to fetch+ module: AnsibleModule instance for error reporting+ + Returns:+ list: List of DNS records+ + Raises:+ Calls module.fail_json on error+ """+ try:+ records_response = client.dns.records.list(zone_id=zone_id, name={"exact": record_name})+ if records_response is None:+ module.fail_json(msg="No response from Cloudflare API")+ + records = records_response.to_dict()+ results = records.get("result", [])+ if not isinstance(results, list):+ module.fail_json(msg="Unexpected response format from Cloudflare API")+ return results+ except Exception as e:+ module.fail_json(msg=f"Error fetching DNS records: {str(e)}")+ + + def run_module() -> None:+ """+ Main module execution.+ + This function handles the module's argument parsing, execution flow,+ and return value preparation.+ """+ module_args = dict(+ auth_email=dict(type='str', required=False, no_log=False),+ auth_key=dict(type='str', required=False, no_log=True),+ auth_token=dict(type='str', required=False, no_log=True),+ zone_name=dict(type='str', required=True),+ record=dict(type='str', required=True),+ )+ + result: dict[str, bool | str | list[dict[str, object]]] = {+ 'changed': False,+ 'records': [],+ 'zone_id': '',+ }+ + module = AnsibleModule(+ argument_spec=module_args,+ supports_check_mode=True,+ required_one_of=[+ ['auth_token', 'auth_key']+ ],+ required_together=[+ ['auth_email', 'auth_key']+ ],+ )+ + try:+ # Import cloudflare here to provide better error message if not installed+ try:+ from cloudflare import Cloudflare+ except ImportError:+ module.fail_json(msg="The 'cloudflare' Python library is required. Install it with: pip install cloudflare")+ + auth_email = module.params.get('auth_email')+ auth_key = module.params.get('auth_key')+ auth_token = module.params.get('auth_token')+ zone_name = module.params['zone_name']+ record = module.params['record']+ + # Initialize Cloudflare client+ if auth_token:+ cf = Cloudflare(api_token=auth_token)+ else:+ cf = Cloudflare(api_email=auth_email, api_key=auth_key)+ + # Fetch zone ID+ zone_id = get_zone_id(cf, zone_name, module)+ result['zone_id'] = zone_id+ + # Fetch DNS records+ records = fetch_dns_records(cf, zone_id, record, module)+ result['records'] = records+ + module.exit_json(**result)+ + except Exception as e:+ module.fail_json(msg=f"Unexpected error: {str(e)}")+ + + def main() -> None:+ """+ Module entry point.+ """+ run_module()+ + + if __name__ == '__main__':+ main()
added
library/cloudflare_ssl.py
+ #!/usr/bin/python+ """+ Ansible module for retrieving Cloudflare SSL/TLS encryption mode.+ + This module provides functionality to get the SSL/TLS encryption mode+ for a specific Cloudflare zone. It can automatically parse domain names+ to extract the zone using the tld library.+ + Example Usage:+ # Get SSL/TLS mode for a zone (automatically parses domain)+ - name: Get Cloudflare SSL mode+ cloudflare_ssl:+ auth_email: "user@example.com"+ auth_key: "api_key_here"+ domain: "subdomain.example.com"+ register: ssl_mode+ + # Get SSL/TLS mode with explicit zone name+ - name: Get Cloudflare SSL mode with zone+ cloudflare_ssl:+ auth_token: "token_here"+ zone_name: "example.com"+ register: ssl_mode+ + Return Values:+ ssl_mode:+ description: The SSL/TLS encryption mode for the zone+ type: str+ returned: success+ sample: 'full'+ zone_id:+ description: The Cloudflare zone ID for the specified zone+ type: str+ returned: success+ zone_name:+ description: The zone name that was queried+ type: str+ returned: success+ changed:+ description: Whether any changes were made (always False for this read-only module)+ type: bool+ returned: always+ """+ + from __future__ import annotations+ + from typing import TYPE_CHECKING+ + from ansible.module_utils.basic import AnsibleModule+ + if TYPE_CHECKING:+ from cloudflare import Cloudflare+ + DOCUMENTATION = """+ ---+ module: cloudflare_ssl+ short_description: Retrieve Cloudflare SSL/TLS encryption mode for a zone+ description:+ - "This module retrieves the SSL/TLS encryption mode for a specific Cloudflare zone."+ - "It supports both API key and API token authentication methods."+ - "Can automatically extract the zone from a domain using the tld library."+ options:+ auth_email:+ description:+ - Email associated with Cloudflare account+ - Required when using auth_key authentication+ required: false+ type: str+ auth_key:+ description:+ - API key for Cloudflare+ - Required when using auth_key authentication+ required: false+ type: str+ auth_token:+ description:+ - API token for Cloudflare (recommended over auth_key)+ - Can be used instead of auth_email and auth_key+ required: false+ type: str+ domain:+ description:+ - Domain name to parse and extract zone from (e.g., subdomain.example.com)+ - Mutually exclusive with zone_name+ required: false+ type: str+ zone_name:+ description:+ - Name of the Cloudflare zone (e.g., example.com)+ - Mutually exclusive with domain+ required: false+ type: str+ """+ + EXAMPLES = """+ # Get SSL/TLS mode using domain (automatically extracts zone)+ - name: Get Cloudflare SSL mode with domain+ cloudflare_ssl:+ auth_email: "user@example.com"+ auth_key: "{{ cloudflare_api_key }}"+ domain: "{{ user.domain }}"+ register: ssl_mode+ + # Get SSL/TLS mode using explicit zone name+ - name: Get Cloudflare SSL mode with zone+ cloudflare_ssl:+ auth_token: "{{ cloudflare_api_token }}"+ zone_name: "example.com"+ register: ssl_mode+ + # Display the SSL mode+ - name: Display SSL mode+ debug:+ msg: "SSL/TLS mode: {{ ssl_mode.ssl_mode }}"+ """+ + + def get_zone_id(client: Cloudflare, zone_name: str, module: AnsibleModule) -> str:+ """+ Fetch the zone ID for a given zone name from Cloudflare.+ + Args:+ client: Cloudflare client instance+ zone_name (str): Name of the zone to look up+ module: AnsibleModule instance for error reporting+ + Returns:+ str: The zone ID+ + Raises:+ Calls module.fail_json on error+ """+ try:+ zone = client.zones.list(name=zone_name)+ if len(zone.result) == 0:+ module.fail_json(msg=f"Specified zone '{zone_name}' was not found")+ return zone.result[0].id+ except Exception as e:+ module.fail_json(msg=f"Error fetching zone ID: {str(e)}")+ raise # Unreachable - Pylance silencer+ + + def get_ssl_tls_mode(client: Cloudflare, zone_id: str, module: AnsibleModule) -> str:+ """+ Get the SSL/TLS settings for a zone.+ + Args:+ client: Cloudflare client instance+ zone_id (str): The Cloudflare zone ID+ module: AnsibleModule instance for error reporting+ + Returns:+ str: The SSL/TLS mode value+ + Raises:+ Calls module.fail_json on error+ """+ try:+ ssl_response = client.zones.settings.get(setting_id='ssl', zone_id=zone_id)+ if ssl_response is None:+ module.fail_json(msg="No response from Cloudflare API")+ + ssl_settings = ssl_response.to_dict()+ ssl_mode = ssl_settings.get('value')+ + if ssl_mode is None:+ module.fail_json(msg="SSL/TLS mode value not found in API response")+ + return str(ssl_mode)+ except Exception as e:+ module.fail_json(msg=f"Error fetching SSL/TLS settings: {str(e)}")+ raise # Unreachable - Pylance silencer+ + + def run_module() -> None:+ """+ Main module execution.+ + This function handles the module's argument parsing, execution flow,+ and return value preparation.+ """+ module_args = dict(+ auth_email=dict(type='str', required=False, no_log=False),+ auth_key=dict(type='str', required=False, no_log=True),+ auth_token=dict(type='str', required=False, no_log=True),+ domain=dict(type='str', required=False),+ zone_name=dict(type='str', required=False),+ )+ + result = dict(+ changed=False,+ ssl_mode='',+ zone_id='',+ zone_name='',+ )+ + module = AnsibleModule(+ argument_spec=module_args,+ supports_check_mode=True,+ required_one_of=[+ ['auth_token', 'auth_key'],+ ['domain', 'zone_name']+ ],+ required_together=[+ ['auth_email', 'auth_key']+ ],+ mutually_exclusive=[+ ['domain', 'zone_name']+ ],+ )+ + try:+ # Import cloudflare here to provide better error message if not installed+ try:+ from cloudflare import Cloudflare+ except ImportError:+ module.fail_json(msg="The 'cloudflare' Python library is required. Install it with: pip install cloudflare")+ + auth_email = module.params.get('auth_email')+ auth_key = module.params.get('auth_key')+ auth_token = module.params.get('auth_token')+ domain = module.params.get('domain')+ zone_name = module.params.get('zone_name')+ + # Parse domain to get zone name if domain is provided+ if domain:+ try:+ from tld import get_tld+ res = get_tld(f"http://{domain}", as_object=True)+ zone_name = getattr(res, 'fld', None)+ if not zone_name:+ module.fail_json(msg=f"Failed to extract zone name from domain '{domain}'")+ except ImportError:+ module.fail_json(msg="The 'tld' Python library is required for domain parsing. Install it with: pip install tld")+ except Exception as e:+ module.fail_json(msg=f"Failed to parse domain '{domain}': {str(e)}")+ + # Ensure zone_name is set+ if not zone_name:+ module.fail_json(msg="Zone name could not be determined from provided parameters")+ + # Initialize Cloudflare client+ if auth_token:+ cf = Cloudflare(api_token=auth_token)+ else:+ cf = Cloudflare(api_email=auth_email, api_key=auth_key)+ + # Fetch zone ID+ zone_id = get_zone_id(cf, zone_name, module)+ result['zone_id'] = zone_id+ result['zone_name'] = zone_name+ + # Fetch SSL/TLS mode+ ssl_mode = get_ssl_tls_mode(cf, zone_id, module)+ result['ssl_mode'] = ssl_mode+ + module.exit_json(**result)+ + except Exception as e:+ module.fail_json(msg=f"Unexpected error: {str(e)}")+ + + def main() -> None:+ """+ Module entry point.+ """+ run_module()+ + + if __name__ == '__main__':+ main()
added
library/ip_timezone_lookup.py
+ #!/usr/bin/python+ # -*- coding: utf-8 -*-+ + DOCUMENTATION = '''+ ---+ module: ip_timezone_lookup+ short_description: Fetch timezone based on IP address from multiple sources+ description:+ - Fetches timezone information from multiple IP geolocation services+ - Returns a consensus timezone when multiple sources agree+ - Provides individual results from each source for verification+ - Returns only valid IANA timezone identifiers suitable for timedatectl+ version_added: "2.9"+ author: "Custom Module"+ options:+ ip_address:+ description:+ - IP address to lookup timezone for+ required: true+ type: str+ timeout:+ description:+ - Timeout in seconds for each API request+ required: false+ default: 5+ type: int+ min_consensus:+ description:+ - Minimum number of sources that must agree for consensus+ required: false+ default: 2+ type: int+ '''+ + EXAMPLES = '''+ - name: Get timezone for specific IP+ ip_timezone_lookup:+ ip_address: "8.8.8.8"+ register: tz_result+ + - name: Set system timezone based on IP location+ command: timedatectl set-timezone {{ tz_result.timezone }}+ when: tz_result.confidence == 'high'+ + - name: Display consensus timezone+ debug:+ msg: "Consensus timezone: {{ tz_result.timezone }}"+ + - name: Display all source results+ debug:+ msg: "{{ tz_result.sources }}"+ '''+ + RETURN = '''+ timezone:+ description: The consensus timezone in IANA format (suitable for timedatectl)+ type: str+ returned: always+ sample: "Europe/Helsinki"+ confidence:+ description: Confidence level of the result (high/medium/low)+ type: str+ returned: always+ sample: "high"+ consensus_count:+ description: Number of sources agreeing on the timezone+ type: int+ returned: always+ sample: 8+ total_sources:+ description: Total number of sources queried+ type: int+ returned: always+ sample: 8+ successful_lookups:+ description: Number of successful API calls returning valid timezones+ type: int+ returned: always+ sample: 8+ sources:+ description: Dictionary containing results from each source+ type: dict+ returned: always+ sample: {+ "ipapi": {"timezone": "Europe/Helsinki", "success": true},+ "ipinfo": {"timezone": "Europe/Helsinki", "success": true}+ }+ ip_used:+ description: The IP address that was looked up+ type: str+ returned: always+ sample: "8.8.8.8"+ '''+ + from ansible.module_utils.basic import AnsibleModule+ import json+ from collections import Counter+ import asyncio+ import aiohttp+ + class IPTimezoneLookup:+ def __init__(self, module):+ self.module = module+ self.ip_address = module.params['ip_address']+ self.timeout = module.params['timeout']+ self.min_consensus = module.params['min_consensus']+ self.results = {}+ + async def make_request(self, session, url, headers=None):+ """Make async HTTP request with error handling"""+ try:+ async with session.get(url, headers=headers, timeout=aiohttp.ClientTimeout(total=self.timeout)) as response:+ if response.status == 200:+ return await response.json()+ return None+ except (aiohttp.ClientError, asyncio.TimeoutError, json.JSONDecodeError) as e:+ return None+ + async def fetch_ipapi(self, session):+ """Fetch from ip-api.com (free, reliable)"""+ url = f"http://ip-api.com/json/{self.ip_address}"+ data = await self.make_request(session, url)+ if data and data.get('status') == 'success':+ return data.get('timezone')+ return None+ + async def fetch_ipinfo(self, session):+ """Fetch from ipinfo.io (free tier, reliable)"""+ url = f"https://ipinfo.io/{self.ip_address}/json"+ data = await self.make_request(session, url)+ if data:+ return data.get('timezone')+ return None+ + async def fetch_ipapi_co(self, session):+ """Fetch from ipapi.co (reliable)"""+ url = f"https://ipapi.co/{self.ip_address}/json/"+ data = await self.make_request(session, url)+ if data and not data.get('error'):+ return data.get('timezone')+ return None+ + async def fetch_freegeoip(self, session):+ """Fetch from freegeoip.app (reliable)"""+ url = f"https://freegeoip.app/json/{self.ip_address}"+ data = await self.make_request(session, url)+ if data:+ return data.get('time_zone')+ return None+ + async def fetch_ipwhois(self, session):+ """Fetch from ipwhois.app (reliable)"""+ url = f"https://ipwhois.app/json/{self.ip_address}"+ data = await self.make_request(session, url)+ if data and data.get('success') != False:+ return data.get('timezone')+ return None+ + async def fetch_geojs(self, session):+ """Fetch from geojs.io (reliable)"""+ url = f"https://get.geojs.io/v1/ip/geo/{self.ip_address}.json"+ data = await self.make_request(session, url)+ if data:+ return data.get('timezone')+ return None+ + async def fetch_ipregistry(self, session):+ """Fetch from ipregistry.co (reliable with tryout key)"""+ url = f"https://api.ipregistry.co/{self.ip_address}?key=tryout"+ data = await self.make_request(session, url)+ if data:+ tz_info = data.get('time_zone')+ if tz_info:+ return tz_info.get('id')+ return None+ + async def fetch_ipapi_is(self, session):+ """Fetch from ipapi.is (reliable)"""+ url = f"https://api.ipapi.is/?q={self.ip_address}"+ data = await self.make_request(session, url)+ if data:+ location = data.get('location')+ if location:+ return location.get('timezone')+ return None+ + async def _fetch_from_source(self, session, source_name, lookup_func):+ """Fetch timezone from a single source with error handling"""+ try:+ timezone = await lookup_func(session)+ if timezone and '/' in timezone: # Valid IANA timezone+ return source_name, {+ 'timezone': timezone,+ 'success': True+ }+ else:+ return source_name, {+ 'timezone': None,+ 'success': False,+ 'error': 'No valid IANA timezone returned'+ }+ except Exception as e:+ return source_name, {+ 'timezone': None,+ 'success': False,+ 'error': str(e)+ }+ + async def _run_lookups_async(self):+ """Run all timezone lookups concurrently"""+ # Only include sources that returned valid results in testing+ lookup_methods = {+ 'ipapi': self.fetch_ipapi,+ 'ipinfo': self.fetch_ipinfo,+ 'ipapi_co': self.fetch_ipapi_co,+ 'freegeoip': self.fetch_freegeoip,+ 'ipwhois': self.fetch_ipwhois,+ 'geojs': self.fetch_geojs,+ 'ipregistry': self.fetch_ipregistry,+ 'ipapi_is': self.fetch_ipapi_is,+ }+ + # Create aiohttp session and run all lookups concurrently+ async with aiohttp.ClientSession() as session:+ tasks = [+ self._fetch_from_source(session, source_name, lookup_func)+ for source_name, lookup_func in lookup_methods.items()+ ]+ results = await asyncio.gather(*tasks)+ + # Store results+ for source_name, result in results:+ self.results[source_name] = result+ + def run_lookups(self):+ """Run all timezone lookups (synchronous wrapper for async operations)"""+ asyncio.run(self._run_lookups_async())+ + def determine_consensus(self):+ """Determine the consensus timezone"""+ # Collect all successful timezones+ timezones = []+ + for source, result in self.results.items():+ if result['success'] and result['timezone']:+ timezones.append(result['timezone'])+ + if not timezones:+ return None, 'none', 0+ + # Count occurrences+ tz_counter = Counter(timezones)+ most_common = tz_counter.most_common(1)[0]+ consensus_tz = most_common[0]+ consensus_count = most_common[1]+ + # Determine confidence+ total_valid = len(timezones)+ if consensus_count >= self.min_consensus:+ if consensus_count >= total_valid * 0.7:+ confidence = 'high'+ elif consensus_count >= total_valid * 0.5:+ confidence = 'medium'+ else:+ confidence = 'low'+ else:+ confidence = 'low'+ + return consensus_tz, confidence, consensus_count+ + def main():+ module = AnsibleModule(+ argument_spec=dict(+ ip_address=dict(type='str', required=True),+ timeout=dict(type='int', default=5),+ min_consensus=dict(type='int', default=2)+ ),+ supports_check_mode=True+ )+ + if module.check_mode:+ module.exit_json(changed=False)+ + lookup = IPTimezoneLookup(module)+ lookup.run_lookups()+ + consensus_tz, confidence, consensus_count = lookup.determine_consensus()+ + successful_lookups = sum(1 for r in lookup.results.values() if r['success'])+ + result = {+ 'changed': False,+ 'timezone': consensus_tz,+ 'confidence': confidence,+ 'consensus_count': consensus_count,+ 'total_sources': len(lookup.results),+ 'successful_lookups': successful_lookups,+ 'sources': lookup.results,+ 'ip_used': lookup.ip_address+ }+ + if consensus_tz:+ module.exit_json(**result)+ else:+ module.fail_json(msg="Could not determine timezone from any source", **result)+ + if __name__ == '__main__':+ main()
added
library/tld_parse.py
+ #!/usr/bin/python+ # -*- coding: utf-8 -*-+ + #########################################################################+ # Title: Saltbox: Library | TLD Parse #+ # Author(s): salty #+ # URL: https://github.com/saltyorg/Saltbox #+ # -- #+ #########################################################################+ # GNU General Public License v3.0 #+ #########################################################################+ + from __future__ import annotations+ + from typing import TYPE_CHECKING, Any+ + from ansible.module_utils.basic import AnsibleModule+ from tld import get_tld+ + if TYPE_CHECKING:+ from tld.utils import Result+ + DOCUMENTATION = r'''+ ---+ module: tld_parse+ short_description: Parse domain names for DNS operations+ description:+ - Parses a domain name into components needed for DNS record management+ - Extracts the full domain and subdomain portions+ - Uses the tld Python library for parsing+ options:+ url:+ description:+ - The domain or URL to parse+ required: true+ type: str+ record:+ description:+ - Optional DNS record to prepend to the domain+ required: false+ type: str+ default: ''+ author:+ - Saltbox Team+ '''+ + EXAMPLES = r'''+ - name: Parse domain+ tld_parse:+ url: "{{ user.domain }}"+ register: domain_info+ + - name: Parse domain with record+ tld_parse:+ url: "{{ user.domain }}"+ record: "subdomain"+ register: domain_info+ + - name: Use parsed values+ debug:+ msg: "Domain: {{ domain_info.domain }}, Record: {{ domain_info.record }}"+ '''+ + RETURN = r'''+ fld:+ description: Full domain name (e.g., example.com)+ type: str+ returned: always+ sample: 'example.com'+ subdomain:+ description: Subdomain portion (empty string if none)+ type: str+ returned: always+ sample: 'www'+ record:+ description: DNS record format (subdomain or '@' for root domain)+ type: str+ returned: always+ sample: 'www'+ tld:+ description: Top-level domain (e.g., com, org, co.uk)+ type: str+ returned: always+ sample: 'com'+ domain:+ description: Domain name without TLD (e.g., example)+ type: str+ returned: always+ sample: 'example'+ '''+ + + def main() -> None:+ module: Any = AnsibleModule(+ argument_spec=dict(+ url=dict(type='str', required=True),+ record=dict(type='str', default='')+ ),+ supports_check_mode=True+ )+ + url: str = module.params['url']+ record: str = module.params['record']+ + try:+ # Build the full URL+ full_url: str+ if record:+ full_url = f"http://{record}.{url}"+ else:+ # Only add http:// if URL doesn't already have a scheme+ if not url.startswith(('http://', 'https://')):+ full_url = f"http://{url}"+ else:+ full_url = url+ + # Parse using tld library+ res: Result = get_tld(full_url, as_object=True) # type: ignore[assignment]+ + # Extract components - use same naming as tld library+ fld: str = res.fld+ subdomain: str = res.subdomain if res.subdomain else ''+ tld: str = res.tld+ domain: str = res.domain+ + # Format record for DNS operations+ dns_record: str = subdomain if subdomain else '@'+ + module.exit_json(+ changed=False,+ fld=fld,+ subdomain=subdomain,+ record=dns_record,+ tld=tld,+ domain=domain+ )+ + except Exception as e:+ module.fail_json(msg=f"Failed to parse domain: {e!s}")+ + + if __name__ == '__main__':+ main()
added
lookup_plugins/docker_var.py
+ from ansible.plugins.lookup import LookupBase+ from ansible.errors import AnsibleLookupError+ from ansible.utils.display import Display+ from typing import Any, List, Optional, Dict+ import json+ + display = Display()+ + DOCUMENTATION = """+ name: docker_var+ author: salty+ version_added: "N/A"+ short_description: Look up a role variable with automatic fallback and JSON conversion+ description:+ - This lookup replicates lookup('vars', _instance_name + suffix, default=lookup('vars', _var_prefix + '_role' + suffix))+ - For instance names or var prefixes with dashes, checks both original and underscore-converted versions+ - Automatically converts lists of JSON strings to dictionaries when detected+ options:+ _terms:+ description: The suffix to append (e.g. '_docker_network_mode')+ required: true+ default:+ description: The default value to return if neither variable is found+ type: raw+ required: false+ convert_json:+ description: Whether to automatically convert JSON string lists to dictionaries (default true)+ type: bool+ required: false+ default: true+ """+ + class LookupModule(LookupBase):+ + def _is_json_string_list(self, value: Any) -> bool:+ """Check if value is a list of JSON strings"""+ if not isinstance(value, list):+ return False+ + # Check if all items are strings that look like JSON objects+ for item in value:+ if not isinstance(item, str):+ return False+ stripped = item.strip()+ if not (stripped.startswith('{') and stripped.endswith('}')):+ return False+ + return len(value) > 0+ + def _convert_json_list_to_dict(self, json_list: List[str]) -> Optional[Dict[str, Any]]:+ """Convert a list of JSON strings to a combined dictionary"""+ combined_dict: Dict[str, Any] = {}+ + for json_str in json_list:+ try:+ # Parse the JSON string directly+ parsed = json.loads(json_str)+ if isinstance(parsed, dict):+ combined_dict.update(parsed)+ else:+ display.warning(f"[docker_var] JSON string parsed to non-dict: {parsed}")+ return None+ except json.JSONDecodeError as je:+ display.warning(f"[docker_var] Invalid JSON in: {json_str[:100]}... Error: {je}")+ return None+ except (TypeError, AttributeError) as e:+ display.warning(f"[docker_var] Failed to process JSON string: {e}")+ return None+ + display.vvv(f"[docker_var] Converted JSON list to dict with {len(combined_dict)} keys using manual parsing")+ return combined_dict+ + def run(self, terms: List[str], variables: Optional[Dict[str, Any]] = None, **kwargs: Any) -> List[Any]:+ if variables is None:+ variables = {}+ + suffix: str = terms[0] if terms else ''+ self.set_options(var_options=variables, direct=kwargs)+ default: Any = self.get_option('default')+ convert_json: Optional[bool] = self.get_option('convert_json')+ if convert_json is None:+ convert_json = True+ + self._templar.available_variables = variables+ + if '_var_prefix' not in variables:+ raise KeyError("[docker_var] Required variable '_var_prefix' not found")+ if '_instance_name' not in variables:+ raise KeyError("[docker_var] Required variable '_instance_name' not found")+ + var_prefix: str = self._templar.template(variables['_var_prefix'], fail_on_undefined=True)+ instance_name: str = self._templar.template(variables['_instance_name'], fail_on_undefined=True)+ + # Create lists of prefixes to try (including dash/underscore variants)+ instance_names_to_try: List[str] = [instance_name]+ if '-' in instance_name:+ underscore_instance: str = instance_name.replace('-', '_')+ instance_names_to_try.append(underscore_instance)+ display.vvv(f"[docker_var] Added underscore variant for instance: {underscore_instance}")+ + var_prefixes_to_try: List[str] = [var_prefix]+ if '-' in var_prefix:+ underscore_prefix: str = var_prefix.replace('-', '_')+ var_prefixes_to_try.append(underscore_prefix)+ display.vvv(f"[docker_var] Added underscore variant for prefix: {underscore_prefix}")+ + # Build the variable names to check+ vars_to_check: List[str] = []+ + # For each instance name variant, add the primary variable+ for inst_name in instance_names_to_try:+ primary_var: str = inst_name + suffix+ vars_to_check.append(primary_var)+ + # For each var prefix variant, add the fallback variable+ for var_pref in var_prefixes_to_try:+ fallback_var: str+ if suffix == '_name':+ fallback_var = var_pref + suffix+ else:+ fallback_var = var_pref + '_role' + suffix+ vars_to_check.append(fallback_var)+ + display.vvv(f"[docker_var] Checking these keys in order: {vars_to_check}")+ if display.verbosity >= 3:+ debug_keys = sorted([+ k for k in variables+ if suffix in k or k.endswith(suffix) or any(k.startswith(prefix) for prefix in instance_names_to_try + var_prefixes_to_try)+ ])+ display.vvv(f"[docker_var] Relevant vars: {debug_keys}")+ + # Try each variable name in order+ last_error: Optional[Exception] = None+ for var_name in vars_to_check:+ if var_name in variables:+ raw_value = variables.get(var_name)+ if raw_value is None:+ display.vvv(f"[docker_var] Skipping {var_name} (value is None)")+ continue+ try:+ result = self._templar.template(raw_value, fail_on_undefined=True)+ if result is not None:+ # Check if we should convert JSON list to dict+ if convert_json and self._is_json_string_list(result):+ display.vvv(f"[docker_var] Found JSON string list for {var_name}, converting to dict")+ converted = self._convert_json_list_to_dict(result)+ if converted is not None:+ return [converted]+ else:+ display.vvv(f"[docker_var] Conversion failed, returning original list")+ + display.vvv(f"[docker_var] Returning templated value for {var_name}: {result}")+ return [result]+ else:+ display.vvv(f"[docker_var] {var_name} is None after templating, skipping")+ except Exception as e:+ display.vvv(f"[docker_var] Error templating {var_name}: {e}")+ last_error = e+ else:+ display.vvv(f"[docker_var] {var_name} not found in variables — skipping")+ + # If we have a default, use it+ if default is not None:+ display.vvv(f"[docker_var] No usable variable found, returning default: {default}")+ return [default]+ + # If all attempts failed with errors, raise the last error+ if last_error is not None:+ raise last_error+ + # Otherwise raise an error - variable not found and no default provided+ raise AnsibleLookupError(+ f"[docker_var] Variable not found and no default provided. "+ f"Tried the following variables in order: {', '.join(vars_to_check)}"+ )
added
lookup_plugins/role_var.py
+ from ansible.plugins.lookup import LookupBase+ from ansible.errors import AnsibleLookupError+ from ansible.utils.display import Display+ from typing import Any, List, Optional, Dict+ import json+ + display = Display()+ + DOCUMENTATION = """+ name: role_var+ author: salty+ version_added: "N/A"+ short_description: Look up a role variable with automatic fallback and JSON conversion+ description:+ - This lookup replicates lookup('vars', traefik_role_var + suffix, default=lookup('vars', role_name + '_role' + suffix))+ - When 'role' parameter is specified, constructs the appropriate traefik_role_var for that role+ - For _name variables with dashes, checks both original and underscore-converted versions+ - Automatically converts lists of JSON strings to dictionaries when detected+ options:+ _terms:+ description: The suffix to append (e.g. '_dns_record')+ required: true+ default:+ description: The default value to return if neither variable is found+ type: raw+ required: false+ role:+ description: The role name to use for lookup instead of the current role_name+ type: str+ required: false+ convert_json:+ description: Whether to automatically convert JSON string lists to dictionaries (default true)+ type: bool+ required: false+ default: true+ """+ + class LookupModule(LookupBase):+ + def _is_json_string_list(self, value: Any) -> bool:+ """Check if value is a list of JSON strings"""+ if not isinstance(value, list):+ return False+ + # Check if all items are strings that look like JSON objects+ for item in value:+ if not isinstance(item, str):+ return False+ stripped = item.strip()+ if not (stripped.startswith('{') and stripped.endswith('}')):+ return False+ + return len(value) > 0+ + def _convert_json_list_to_dict(self, json_list: List[str]) -> Optional[Dict[str, Any]]:+ """Convert a list of JSON strings to a combined dictionary"""+ combined_dict: Dict[str, Any] = {}+ + for json_str in json_list:+ try:+ # Parse the JSON string directly+ parsed = json.loads(json_str)+ if isinstance(parsed, dict):+ combined_dict.update(parsed)+ else:+ display.warning(f"[role_var] JSON string parsed to non-dict: {parsed}")+ return None+ except json.JSONDecodeError as je:+ display.warning(f"[role_var] Invalid JSON in: {json_str[:100]}... Error: {je}")+ return None+ except (TypeError, AttributeError) as e:+ display.warning(f"[role_var] Failed to process JSON string: {e}")+ return None+ + display.vvv(f"[role_var] Converted JSON list to dict with {len(combined_dict)} keys using manual parsing")+ return combined_dict+ + def run(self, terms: List[str], variables: Optional[Dict[str, Any]] = None, **kwargs: Any) -> List[Any]:+ if variables is None:+ variables = {}+ + suffix: str = terms[0] if terms else ''+ self.set_options(var_options=variables, direct=kwargs)+ default: Any = self.get_option('default')+ specified_role: Optional[str] = self.get_option('role')+ convert_json: Optional[bool] = self.get_option('convert_json')+ if convert_json is None:+ convert_json = True+ + self._templar.available_variables = variables+ + # Use specified role if provided, otherwise fall back to role_name+ if specified_role:+ role_name: str = self._templar.template(specified_role, fail_on_undefined=True)+ else:+ if 'role_name' not in variables:+ raise KeyError("[role_var] Required variable 'role_name' not found")+ role_name: str = self._templar.template(variables['role_name'], fail_on_undefined=True)+ + # If a custom role is specified, we need to construct the appropriate traefik_role_var for that role+ traefik_role_var: str+ if specified_role:+ # Replicate the logic: traefik_role_var: "{{ lookup('vars', role_name + '_name', default=role_name) }}"+ custom_role_name_var: str = role_name + '_name'+ if custom_role_name_var in variables:+ traefik_role_var = self._templar.template(variables[custom_role_name_var], fail_on_undefined=True)+ else:+ traefik_role_var = role_name+ display.vvv(f"[role_var] Using custom traefik_role_var for role '{role_name}': {traefik_role_var}")+ else:+ if 'traefik_role_var' not in variables:+ raise KeyError("[role_var] Required variable 'traefik_role_var' not found")+ traefik_role_var = self._templar.template(variables['traefik_role_var'], fail_on_undefined=True)+ + # Build the variable names to check+ primary_var: str+ fallback_var: str+ if suffix == '_name':+ primary_var = traefik_role_var + suffix+ fallback_var = role_name + suffix+ else:+ primary_var = traefik_role_var + suffix+ fallback_var = role_name + '_role' + suffix+ + # Create list of all variable names to check (including dash/underscore variants)+ vars_to_check: List[str] = []+ + for var_name in [primary_var, fallback_var]:+ vars_to_check.append(var_name)+ # If the variable name contains dashes, also check the underscore version+ if '-' in var_name:+ underscore_var = var_name.replace('-', '_')+ vars_to_check.append(underscore_var)+ display.vvv(f"[role_var] Added underscore variant: {underscore_var} for {var_name}")+ + display.vvv(f"[role_var] Checking these keys in order: {vars_to_check}")+ if display.verbosity >= 3:+ debug_keys = sorted([+ k for k in variables+ if suffix in k or k.endswith(suffix) or k.startswith((traefik_role_var, role_name))+ ])+ display.vvv(f"[role_var] Relevant vars: {debug_keys}")+ + # Try each variable name in order+ last_error: Optional[Exception] = None+ for var_name in vars_to_check:+ if var_name in variables:+ raw_value = variables.get(var_name)+ if raw_value is None:+ display.vvv(f"[role_var] Skipping {var_name} (value is None)")+ continue+ try:+ result = self._templar.template(raw_value, fail_on_undefined=True)+ if result is not None:+ # Check if we should convert JSON list to dict+ if convert_json and self._is_json_string_list(result):+ display.vvv(f"[role_var] Found JSON string list for {var_name}, converting to dict")+ converted = self._convert_json_list_to_dict(result)+ if converted is not None:+ return [converted]+ else:+ display.vvv(f"[role_var] Conversion failed, returning original list")+ + display.vvv(f"[role_var] Returning templated value for {var_name}: {result}")+ return [result]+ else:+ display.vvv(f"[role_var] {var_name} is None after templating, skipping")+ except Exception as e:+ display.vvv(f"[role_var] Error templating {var_name}: {e}")+ last_error = e+ else:+ display.vvv(f"[role_var] {var_name} not found in variables — skipping")+ + # If we have a default, use it+ if default is not None:+ display.vvv(f"[role_var] No usable variable found, returning default: {default}")+ return [default]+ + # If all attempts failed with errors, raise the last error+ if last_error is not None:+ raise last_error+ + # Otherwise raise an error - variable not found and no default provided+ raise AnsibleLookupError(+ f"[role_var] Variable not found and no default provided. "+ f"Tried the following variables in order: {', '.join(vars_to_check)}"+ )
added
resources/tasks/instances/nzbget.yml
+ #########################################################################+ # Title: Saltbox: Resources | Tasks | Instances | Get NZBGet Info #+ # Author(s): salty #+ # URL: https://github.com/saltyorg/Saltbox #+ # -- #+ #########################################################################+ # GNU General Public License v3.0 #+ #########################################################################+ ---+ - name: Resources | Tasks | Instances | Get Info | Check if NZBGet exists+ ansible.builtin.stat:+ path: "{{ lookup('role_var', '_paths_config_location', role='nzbget') }}"+ register: nzbget_role_paths_config_location_stat+ + - name: Resources | Tasks | Instances | Get Info | NZBGet tasks+ when: nzbget_role_paths_config_location_stat.stat.exists+ block:+ - name: Resources | Tasks | Instances | Get Info | Fetch NZBGet ControlUsername+ ansible.builtin.shell: "grep -oP '^ControlUsername=\\K.*' {{ lookup('role_var', '_paths_config_location', role='nzbget') }}"+ register: nzbget_username_result+ changed_when: false+ + - name: Resources | Tasks | Instances | Get Info | Fetch NZBGet ControlPassword+ ansible.builtin.shell: "grep -oP '^ControlPassword=\\K.*' {{ lookup('role_var', '_paths_config_location', role='nzbget') }}"+ register: nzbget_password_result+ changed_when: false+ + - name: Resources | Tasks | Instances | Get Info | Set 'nzbget_info' variable+ ansible.builtin.set_fact:+ nzbget_info:+ name: nzbget+ url: "{{ lookup('role_var', '_web_url', role='nzbget') }}"+ username: "{{ nzbget_username_result.stdout }}"+ password: "{{ nzbget_password_result.stdout }}"+ + - name: Resources | Tasks | Instances | Get Info | Set 'nzbget_info' variable+ ansible.builtin.set_fact:+ nzbget_info:+ name: nzbget+ url: "{{ lookup('role_var', '_web_url', role='nzbget') }}"+ username: "not installed"+ password: "not installed"+ when: (not nzbget_role_paths_config_location_stat.stat.exists)
added
resources/tasks/instances/sabnzbd.yml
+ #########################################################################+ # Title: Saltbox: Resources | Tasks | Instances | Get SABnzbd Info #+ # Author(s): salty #+ # URL: https://github.com/saltyorg/Saltbox #+ # -- #+ #########################################################################+ # GNU General Public License v3.0 #+ #########################################################################+ ---+ - name: Resources | Tasks | Instances | Get Info | Check if SABnzbd exists+ ansible.builtin.stat:+ path: "{{ lookup('role_var', '_paths_config_location', role='sabnzbd') }}"+ register: sabnzbd_role_paths_config_location_stat+ + - name: Resources | Tasks | Instances | Get Info | SABnzbd API Key tasks+ when: sabnzbd_role_paths_config_location_stat.stat.exists+ block:+ - name: Resources | Tasks | Instances | Get Info | Fetch SABnzbd API Key+ ansible.builtin.shell: "grep -oP '^api_key = \\K.*' {{ lookup('role_var', '_paths_config_location', role='sabnzbd') }}"+ register: sabnzbd_api_key_result+ changed_when: false+ + - name: Resources | Tasks | Instances | Get Info | Set 'sabnzbd_info' variable+ ansible.builtin.set_fact:+ sabnzbd_info:+ name: sabnzbd+ url: "{{ lookup('role_var', '_web_url', role='sabnzbd') }}"+ api_key: "{{ sabnzbd_api_key_result.stdout }}"+ + - name: Resources | Tasks | Instances | Get Info | Set 'sabnzbd_info' variable+ ansible.builtin.set_fact:+ sabnzbd_info:+ name: sabnzbd+ url: "{{ lookup('role_var', '_web_url', role='sabnzbd') }}"+ api_key: "not installed"+ when: (not sabnzbd_role_paths_config_location_stat.stat.exists)
added
resources/tasks/variables/import_inventory_vars.yml
+ #####################################################################################+ # Title: Saltbox: Resources | Tasks | Variables | Import Inventory Vars #+ # Author(s): salty #+ # URL: https://github.com/saltyorg/Saltbox #+ # -- #+ #####################################################################################+ # GNU General Public License v3.0 #+ #####################################################################################+ ---+ - name: "Resources | Tasks | Variables | Import Inventory Vars | Check if 'localhost.yml' exists"+ ansible.builtin.stat:+ path: "/srv/git/saltbox/inventories/host_vars/localhost.yml"+ register: localhost_inventory_file+ + - name: "Resources | Tasks | Variables | Import Inventory Vars | Import Inventory variables"+ ansible.builtin.include_vars: "/srv/git/saltbox/inventories/host_vars/localhost.yml"+ when: localhost_inventory_file.stat.exists
added
resources/tasks/variables/import_role_vars.yml
+ ################################################################################+ # Title: Saltbox: Resources | Tasks | Variables | Import Role Vars #+ # Author(s): salty #+ # URL: https://github.com/saltyorg/Saltbox #+ # -- #+ ################################################################################+ # GNU General Public License v3.0 #+ ################################################################################+ ---+ - name: "Resources | Tasks | Variables | Import Role Vars | Include {{ import_role_name }} role default vars"+ ansible.builtin.include_vars: "/srv/git/saltbox/roles/{{ import_role_name }}/defaults/main.yml"+ + - name: "Resources | Tasks | Variables | Import Role Vars | Check if 'localhost.yml' exists"+ ansible.builtin.stat:+ path: "/srv/git/saltbox/inventories/host_vars/localhost.yml"+ register: localhost_inventory_file+ + - name: "Resources | Tasks | Variables | Import Role Vars | Import Inventory variables"+ ansible.builtin.include_vars: "/srv/git/saltbox/inventories/host_vars/localhost.yml"+ when: localhost_inventory_file.stat.exists
added
roles/cloudplow_disable/tasks/main.yml
+ #########################################################################+ # Title: Saltbox: Cloudplow Disable Role #+ # Author(s): salty #+ # URL: https://github.com/saltyorg/Saltbox #+ # -- #+ #########################################################################+ # GNU General Public License v3.0 #+ #########################################################################+ ---+ - name: Delete Legacy Service+ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/systemd/delete_service.yml"+ vars:+ _service_name: "{{ cloudplow_service_name_old }}"+ + - name: Delete Service+ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/systemd/delete_service.yml"+ vars:+ _service_name: "{{ cloudplow_service_name }}"
added
roles/motd/tasks/subtasks/emby_info.yml
+ #########################################################################+ # Title: Saltbox: MOTD | Build Emby Info #+ # Author(s): salty #+ # URL: https://github.com/saltyorg/Saltbox #+ # -- #+ #########################################################################+ # GNU General Public License v3.0 #+ #########################################################################+ ---+ - name: Set 'emby_info' variable+ ansible.builtin.set_fact:+ emby_info: "{{ emby_info | default({}) | combine({emby_name: {'name': emby_name, 'url': lookup('role_var', '_web_url', role=emby_name), 'token': 'not installed'}}) }}"
added
roles/motd/tasks/subtasks/jellyfin_info.yml
+ #########################################################################+ # Title: Saltbox: MOTD | Build Jellyfin Info #+ # Author(s): salty #+ # URL: https://github.com/saltyorg/Saltbox #+ # -- #+ #########################################################################+ # GNU General Public License v3.0 #+ #########################################################################+ ---+ - name: Set 'jellyfin_info' variable+ ansible.builtin.set_fact:+ jellyfin_info: "{{ jellyfin_info | default({}) | combine({jellyfin_name: {'name': jellyfin_name, 'url': lookup('role_var', '_web_url', role=jellyfin_name), 'token': 'not installed'}}) }}"
added
roles/motd/tasks/subtasks/qbittorrent_info.yml
+ #########################################################################+ # Title: Saltbox: MOTD | Build qBittorrent Info #+ # Author(s): salty #+ # URL: https://github.com/saltyorg/Saltbox #+ # -- #+ #########################################################################+ # GNU General Public License v3.0 #+ #########################################################################+ ---+ - name: Set 'qbittorrent_info' variable+ ansible.builtin.set_fact:+ qbittorrent_info: "{{ qbittorrent_info | default({}) | combine({qbittorrent_name: {'name': qbittorrent_name, 'url': lookup('role_var', '_web_url', role=qbittorrent_name), 'username': user.name, 'password': user.pass}}) }}"
added
roles/postgres_host/defaults/main.yml
+ ##########################################################################+ # Title: Saltbox: PostgreSQL | Default Variables #+ # Author(s): salty #+ # URL: https://github.com/saltyorg/Saltbox #+ # -- #+ ##########################################################################+ # GNU General Public License v3.0 #+ ##########################################################################+ ---+ ################################+ # Basics+ ################################+ + # Supports any versions from https://wiki.postgresql.org/wiki/Apt+ # Each version is unique and you cannot specify the same version twice+ postgres_host_role_versions: ["17"]+ + ################################+ # Paths+ ################################+ + postgres_host_role_data_directory: "{{ server_appdata_path }}/postgresql"+ + ################################+ # Superuser+ ################################+ + postgres_host_role_create_root_superuser: true+ postgres_host_role_root_superuser_name: "root"+ postgres_host_role_root_superuser_password: "password4321"+ + ################################+ # Per-Version Configuration+ ################################+ + # Example:+ # postgres_host_role_config:+ # "16":+ # allowed_hosts:+ # - "172.19.0.0/16"+ # - "10.0.0.0/8"+ # auth_method: "md5"+ # users:+ # - name: "app_user"+ # password: "password1"+ # - name: "app_user2"+ # password: "password2"+ # databases:+ # - name: "app_database"+ # users:+ # - "app_user"+ # - "app_user2"+ # - name: "metrics_db"+ # users:+ # - "app_user"+ # "17":+ # allowed_hosts:+ # - "172.19.0.0/16"+ # auth_method: "scram-sha-256"+ # users:+ # - name: "app_user3"+ # password: "password3"+ # databases:+ # - name: "new_app_database"+ # users:+ # - "app_user3"+ postgres_host_role_config: {}+ + ################################+ # Access Control+ ################################+ + postgres_host_role_allowed_hosts:+ - "172.19.0.0/16"+ + postgres_host_role_auth_method: "scram-sha-256"+ + ################################+ # User & Database Configuration+ ################################+ + postgres_host_role_users:+ - name: "{{ user.name }}"+ password: "{{ user.pass }}"+ + postgres_host_role_databases:+ - name: "saltbox"+ users: ["{{ user.name }}"]
added
roles/postgres_host/tasks/main.yml
+ #########################################################################+ # Title: Saltbox: PostgreSQL Host Role #+ # Author(s): salty #+ # URL: https://github.com/saltyorg/Saltbox #+ # -- #+ #########################################################################+ # GNU General Public License v3.0 #+ #########################################################################+ ---+ - name: Update apt cache+ ansible.builtin.apt:+ update_cache: true+ + - name: Install postgresql-common+ ansible.builtin.apt:+ name: postgresql-common+ state: latest+ + - name: Run PGDG script+ ansible.builtin.command: /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y+ args:+ creates: /etc/apt/sources.list.d/pgdg.list+ + - name: Create PostgreSQL data directory+ ansible.builtin.file:+ path: "{{ postgres_host_role_data_directory }}"+ state: directory+ owner: "{{ user.name }}"+ group: "{{ user.name }}"+ mode: '0755'+ + - name: Create symlink for entire PostgreSQL directory+ ansible.builtin.file:+ src: "{{ postgres_host_role_data_directory }}"+ dest: "/etc/postgresql"+ state: link+ owner: "{{ user.name }}"+ group: "{{ user.name }}"+ follow: false+ force: true+ + - name: Install PostgreSQL packages for specified versions+ ansible.builtin.apt:+ name: "postgresql-{{ item }}"+ state: latest+ loop: "{{ postgres_host_role_versions }}"+ + - name: Discover existing PostgreSQL clusters+ ansible.builtin.shell: |+ pg_lsclusters --no-header | awk '{print $1":"$2":"$3}'+ register: existing_clusters+ changed_when: false+ failed_when: false+ + - name: Parse cluster information+ ansible.builtin.set_fact:+ postgres_cluster_info: "{{ postgres_cluster_info | default([]) + [{'version': item.split(':')[0], 'name': item.split(':')[1], 'port': item.split(':')[2]}] }}"+ loop: "{{ existing_clusters.stdout_lines | default([]) }}"+ when: (existing_clusters.stdout_lines is defined)+ + - name: Find all PostgreSQL services+ ansible.builtin.shell: |+ systemctl list-units --type=service --state=loaded --no-legend | grep postgresql | awk '{print $1}'+ register: postgres_services+ changed_when: false+ failed_when: false+ + - name: Stop and disable unspecified PostgreSQL versions+ ansible.builtin.systemd:+ name: "{{ item }}"+ state: stopped+ enabled: false+ loop: "{{ postgres_services.stdout_lines }}"+ when:+ - postgres_services.stdout_lines is defined+ - item != "postgresql.service"+ - item not in (postgres_host_role_versions | map('regex_replace', '^(.*)$', 'postgresql@\\1-main.service') | list)+ failed_when: false+ + - name: Check if default clusters exist+ ansible.builtin.stat:+ path: "/var/lib/postgresql/{{ item }}/main"+ register: default_clusters_exist+ loop: "{{ postgres_host_role_versions }}"+ + - name: Stop default clusters if they exist+ ansible.builtin.command: >+ pg_ctlcluster {{ item.item }} main stop+ loop: "{{ default_clusters_exist.results }}"+ become: true+ become_user: "{{ user.name }}"+ when: item.stat.exists+ failed_when: false+ + - name: Remove default clusters if they exist+ ansible.builtin.command: >+ pg_dropcluster {{ item.item }} main+ loop: "{{ default_clusters_exist.results }}"+ become: true+ become_user: "{{ user.name }}"+ when: item.stat.exists+ failed_when: false+ + - name: Check if clusters are already configured+ ansible.builtin.shell: |+ pg_lsclusters --no-header | grep "^{{ item }}\s\+main" || echo "not_found"+ register: clusters_configured+ loop: "{{ postgres_host_role_versions }}"+ become: true+ become_user: "{{ user.name }}"+ changed_when: false+ failed_when: false+ + - name: Update port in postgresql.conf for existing clusters+ ansible.builtin.lineinfile:+ path: "{{ postgres_host_role_data_directory }}/{{ item }}/main/postgresql.conf"+ regexp: "^#?port ="+ line: "port = {{ postgres_port_assignments[item] }}"+ owner: "{{ user.name }}"+ group: "{{ user.name }}"+ mode: '0644'+ loop: "{{ postgres_host_role_versions }}"+ when: postgres_port_assignments[item] is defined+ + - name: Create clusters with custom data directory and user+ ansible.builtin.command: >+ pg_createcluster {{ item.item }} main+ --datadir={{ postgres_host_role_data_directory }}/{{ item.item }}/data+ --user={{ user.name }}+ --port={{ postgres_port_assignments[item.item] }}+ loop: "{{ clusters_configured.results }}"+ become: true+ become_user: "{{ user.name }}"+ when: ('not_found' in item.stdout)+ + - name: Create systemd override directory for PostgreSQL service+ ansible.builtin.file:+ path: "/etc/systemd/system/postgresql@.service.d"+ state: directory+ owner: root+ group: root+ mode: '0755'+ + - name: Create systemd override for PostgreSQL service+ ansible.builtin.copy:+ content: |+ [Service]+ User={{ user.name }}+ Group={{ user.name }}+ + # Ensure runtime directory exists and has correct ownership+ ExecStartPre=+/bin/mkdir -p /var/run/postgresql+ ExecStartPre=+/bin/chown -R {{ user.name }}:{{ user.name }} /var/run/postgresql+ ExecStartPre=+/bin/chmod 0775 /var/run/postgresql+ + # Ensure log directory has correct ownership+ ExecStartPre=+/bin/mkdir -p /var/run/postgresql+ ExecStartPre=+/bin/chown -R {{ user.name }}:{{ user.name }} /var/log/postgresql+ dest: "/etc/systemd/system/postgresql@.service.d/override.conf"+ owner: root+ group: root+ mode: '0644'+ register: postgres_systemd_override+ + - name: Reload systemd daemon after override changes+ ansible.builtin.systemd:+ daemon_reload: true+ when: postgres_systemd_override.changed+ + - name: Calculate desired port assignments based on version order+ ansible.builtin.set_fact:+ postgres_port_assignments: "{{ postgres_port_assignments | default({}) | combine({item: (5432 + my_idx)}) }}"+ loop: "{{ postgres_host_role_versions }}"+ loop_control:+ index_var: my_idx+ + - name: Stop all PostgreSQL services before port validation+ ansible.builtin.systemd:+ name: "postgresql@{{ item }}-main"+ state: stopped+ loop: "{{ postgres_host_role_versions }}"+ failed_when: false+ + - name: Stop generic PostgreSQL service+ ansible.builtin.systemd:+ name: postgresql+ state: stopped+ failed_when: false+ + - name: Check if desired ports are available+ ansible.builtin.wait_for:+ port: "{{ postgres_port_assignments[item] }}"+ host: "127.0.0.1"+ state: stopped+ timeout: 1+ loop: "{{ postgres_host_role_versions }}"+ register: port_check_results+ failed_when: false+ + - name: Fail if any desired ports are not available+ ansible.builtin.fail:+ msg: "Port {{ postgres_port_assignments[item] }} for PostgreSQL version {{ item }} is not available (in use by another service)"+ loop: "{{ postgres_host_role_versions }}"+ loop_control:+ index_var: port_idx+ when: port_check_results.results[port_idx].failed+ + - name: Start PostgreSQL services with correct port assignments+ ansible.builtin.systemd:+ name: "postgresql@{{ item }}-main"+ state: started+ enabled: true+ loop: "{{ postgres_host_role_versions }}"+ + - name: Ensure generic PostgreSQL service is enabled+ ansible.builtin.systemd:+ name: postgresql+ enabled: true+ + - name: Rediscover PostgreSQL clusters after setup+ ansible.builtin.shell: |+ pg_lsclusters --no-header | awk '{print $1":"$2":"$3":"$4}'+ register: updated_clusters+ changed_when: false+ failed_when: false+ + - name: Parse updated cluster information+ ansible.builtin.set_fact:+ postgres_active_clusters: "{{ postgres_active_clusters | default([]) + [{'version': item.split(':')[0], 'name': item.split(':')[1], 'port': item.split(':')[2], 'status': item.split(':')[3]}] }}"+ loop: "{{ updated_clusters.stdout_lines | default([]) }}"+ when:+ - (updated_clusters.stdout_lines is defined)+ - (item.split(':')[0] in postgres_host_role_versions)+ + - name: Setup databases and users for each PostgreSQL version+ ansible.builtin.include_tasks: setup_version.yml+ vars:+ postgres_version: "{{ postgres_item }}"+ cluster_port: "{{ postgres_port_assignments[postgres_item] }}"+ loop: "{{ postgres_host_role_versions | unique }}"+ loop_control:+ loop_var: "postgres_item"
added
roles/postgres_host/tasks/setup_version.yml
+ #########################################################################+ # Title: Saltbox: PostgreSQL Host Role #+ # Author(s): salty #+ # URL: https://github.com/saltyorg/Saltbox #+ # -- #+ #########################################################################+ # GNU General Public License v3.0 #+ #########################################################################+ ---+ - name: Set version-specific configuration for version {{ postgres_version }}+ ansible.builtin.set_fact:+ version_users: "{{ postgres_host_role_config[postgres_version].users | default(postgres_host_role_users) }}"+ version_databases: "{{ postgres_host_role_config[postgres_version].databases | default(postgres_host_role_databases) }}"+ version_allowed_hosts: "{{ postgres_host_role_config[postgres_version].allowed_hosts | default(postgres_host_role_allowed_hosts) }}"+ version_auth_method: "{{ postgres_host_role_config[postgres_version].auth_method | default(postgres_host_role_auth_method) }}"+ + - name: Add allowed hosts to pg_hba.conf for version {{ postgres_version }}+ ansible.builtin.blockinfile:+ path: "{{ postgres_host_role_data_directory }}/{{ postgres_version }}/main/pg_hba.conf"+ marker: "### SALTBOX MANAGED BLOCK - {mark} ###"+ block: |+ {% for host in version_allowed_hosts %}+ # Allow connections from {{ host }}+ host all all {{ "%-18s" | format(host) }} {{ version_auth_method }}+ {% endfor %}+ owner: "{{ user.name }}"+ group: "{{ user.name }}"+ mode: '0640'+ + - name: Create default user database for version {{ postgres_version }}+ community.postgresql.postgresql_db:+ name: "{{ user.name }}"+ state: present+ login_port: "{{ cluster_port }}"+ login_user: "{{ user.name }}"+ become: true+ become_user: "{{ user.name }}"+ + - name: Create root superuser for version {{ postgres_version }}+ community.postgresql.postgresql_user:+ name: "{{ postgres_host_role_root_superuser_name }}"+ password: "{{ postgres_host_role_root_superuser_password }}"+ role_attr_flags: SUPERUSER,CREATEDB,CREATEROLE+ state: present+ login_port: "{{ cluster_port }}"+ login_user: "{{ user.name }}"+ become: true+ become_user: "{{ user.name }}"+ no_log: true+ when: (postgres_host_role_create_root_superuser | bool)+ + - name: Create PostgreSQL users for version {{ postgres_version }}+ community.postgresql.postgresql_user:+ name: "{{ item.name }}"+ password: "{{ item.password }}"+ state: present+ login_port: "{{ cluster_port }}"+ login_user: "{{ user.name }}"+ loop: "{{ version_users }}"+ become: true+ become_user: "{{ user.name }}"+ no_log: true+ when: (version_users | length > 0)+ + - name: Create PostgreSQL databases for version {{ postgres_version }}+ community.postgresql.postgresql_db:+ name: "{{ item.name }}"+ state: present+ login_port: "{{ cluster_port }}"+ login_user: "{{ user.name }}"+ loop: "{{ version_databases }}"+ become: true+ become_user: "{{ user.name }}"+ when: (version_databases | length > 0)+ + - name: Grant database privileges to users for version {{ postgres_version }}+ community.postgresql.postgresql_privs:+ login_db: "{{ item.0.name }}"+ roles: "{{ item.1 }}"+ privs: ALL+ type: database+ state: present+ login_port: "{{ cluster_port }}"+ login_user: "{{ user.name }}"+ loop: "{{ version_databases | subelements('users', skip_missing=True) }}"+ become: true+ become_user: "{{ user.name }}"+ when: (version_databases | length > 0)
added
roles/pre_tasks/defaults/main.yml
+ ##########################################################################+ # Title: Saltbox: pre_tasks | Default Variables #+ # Author(s): salty #+ # URL: https://github.com/saltyorg/Saltbox #+ # -- #+ ##########################################################################+ # GNU General Public License v3.0 #+ ##########################################################################+ ---+ pre_tasks_saltbox_mod_location: "{{ server_appdata_path }}/saltbox_mod"
added
roles/traefik_template/templates/docker-compose.yml.j2
+ ---+ services:+ {{ traefik_template_name }}:+ container_name: {{ traefik_template_name }}+ environment: # Change this as needed for your image+ PUID: "{{ uid }}"+ PGID: "{{ gid }}"+ TZ: "{{ tz }}"+ hostname: {{ traefik_template_name }}+ image: your_image:your_tag+ labels:+ {% for key, value in docker_labels_common | dictsort %}+ {{ key }}: {{ value }}+ {% endfor %}+ {% if ((service_gluetun_enabled.user_input | lower) | bool) %}+ network_mode: {{ traefik_template_role_docker_network_mode }}+ {% else %}+ networks:+ - saltbox+ {% endif %}+ restart: unless-stopped+ volumes: # Change this as needed for your image+ - {{ server_appdata_path }}/{{ traefik_template_name }}:/config+ - /etc/localtime:/etc/localtime:ro+ + {% if not ((service_gluetun_enabled.user_input | lower) | bool) %}+ networks:+ saltbox:+ external: true+ {% endif %}
added
scripts/saltbox-defaults-linter.py
+ #!/usr/bin/env python3+ """+ Saltbox Role Defaults Linter+ Enforces Saltbox formatting rules for role defaults/main.yml files+ + Rules:+ 1. Operator Alignment: | and + operators must align with first character after "{{+ - Standard: All operators align with base position (first char after "{{ )+ - Exception: When 'else' is followed by content on a new line that continues,+ subsequent operators within that else branch align with content after 'else '+ - Context resets when if/else blocks close (marked by )) or )))+ + 2. If/Else Alignment: if and else keywords must align vertically within {{ }} brackets+ """+ + import re+ import sys+ from pathlib import Path+ from typing import List+ + + class LintError:+ """Represents a single linting error"""+ + def __init__(self, file: str, line: int, message: str, repo_url: str = None, commit_sha: str = None):+ self.file = file+ self.line = line+ self.message = message+ self.repo_url = repo_url+ self.commit_sha = commit_sha+ + def to_github_annotation(self) -> str:+ """+ Format error as GitHub Actions annotation+ + Includes a clickable link to the file at the specific commit in the message,+ since GitHub's default annotation links only point to the commit page+ (which doesn't show the file if it wasn't modified in that commit).+ """+ # Build GitHub blob URL if we have repo and commit info+ if self.repo_url and self.commit_sha:+ # Format: https://github.com/owner/repo/blob/commit_sha/path/to/file.yml#L123+ github_link = f"{self.repo_url}/blob/{self.commit_sha}/{self.file}#L{self.line}"+ message_with_link = f"{self.message} - {github_link}"+ else:+ message_with_link = self.message+ + return f"::error file={self.file},line={self.line}::{message_with_link}"+ + def __str__(self) -> str:+ return f"{self.file}:{self.line} - {self.message}"+ + + class DefaultsLinter:+ """Lints a single defaults/main.yml file"""+ + def __init__(self, file_path: Path, repo_url: str = None, commit_sha: str = None):+ self.file_path = file_path+ self.repo_url = repo_url+ self.commit_sha = commit_sha+ self.lines = file_path.read_text().splitlines()+ self.errors: List[LintError] = []+ + def check_operator_alignment(self):+ """+ Rule 1: Check | and + operators align with first character after '{{ '+ + Standard alignment - all operators at base position:+ sonarr_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='sonarr')+ | combine(lookup('role_var', '_docker_envs_custom', role='sonarr')) }}"+ ^ Aligns with 'l' in lookup (position after "{{ )+ + Multiple operators - all at same base position:+ wikijs_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='wikijs')+ + lookup('role_var', '_docker_networks_custom', role='wikijs') }}"+ ^ All + align with 'd' in docker_networks_common+ + Exception - else with continuing content creates new context:+ dozzle_role_docker_commands: "{{ lookup('role_var', '_docker_commands_agent', role='dozzle')+ + lookup('role_var', '_docker_commands_default', role='dozzle')+ if lookup('role_var', '_agent_mode', role='dozzle')+ else lookup('role_var', '_docker_commands_default', role='dozzle')+ + lookup('role_var', '_docker_commands_custom', role='dozzle') }}"+ ^ This + aligns with 'l' in lookup after 'else ', not with base+ + Inline if/else (no context change):+ traefik_role_docker_labels: "{{ docker_labels_saltbox+ | combine((lookup('role_var', '_docker_labels_http', role='traefik')+ if traefik_http+ else lookup('role_var', '_docker_labels_dns', role='traefik')))+ | combine(lookup('role_var', '_docker_labels_custom', role='traefik')) }}"+ ^ All | remain at base position - inline if/else doesn't change context+ """+ i = 0+ while i < len(self.lines):+ curr_line = self.lines[i]+ + # Match variable definitions starting multi-line Jinja expressions+ # Pattern: variable_name: "{{ <content>+ match = re.match(r'^([a-z_]+): "{{ (.+)', curr_line)+ + # Only process if:+ # 1. Line matches pattern+ # 2. Line doesn't end with }} (multi-line expression)+ if match and '}}' not in curr_line:+ var_name = match.group(1)+ + # This is the start of a multi-line expression+ # Track the expected alignment position for continuation lines+ expected_alignment = len(curr_line.split('"{{')[0] + '"{{ ')+ + # Now check all continuation lines until we hit }}+ j = i + 1+ # Track alignment context - changes when we encounter 'else' with content+ # and resets when if/else blocks close+ current_alignment = expected_alignment+ in_else_context = False+ + while j < len(self.lines):+ continuation_line = self.lines[j]+ + # Check if this line closes an if/else block (ends with )) or }))+ # This resets alignment context back to base+ if in_else_context and re.search(r'\)\)(?:\)|$)', continuation_line.rstrip()):+ current_alignment = expected_alignment+ in_else_context = False+ + # Check if this line starts with 'else' followed by content that continues+ # This creates a new alignment context for subsequent operators+ # Pattern: else followed by lookup/variable (not just closing like '])')+ # BUT: only if the else block doesn't close on the same line+ else_match = re.match(r'^(\s+)else (lookup|[a-z_]+)', continuation_line)+ if else_match:+ # Check if this line also closes the if/else block (contains )))+ # If so, don't set a persistent context+ closes_immediately = re.search(r'\)\)\)', continuation_line)+ + if not closes_immediately:+ # New alignment context: content after 'else '+ # Only persists if the line doesn't close the block+ current_alignment = len(else_match.group(1)) + len('else ')+ in_else_context = True+ + # Check if this line has an operator+ op_match = re.match(r'^(\s+)([|+]) ', continuation_line)+ + if op_match:+ actual_spaces = len(op_match.group(1))+ operator = op_match.group(2)+ + # Use current alignment context (changes after 'else', resets after closing parens)+ expected_spaces = current_alignment+ + if actual_spaces != expected_spaces:+ diff = actual_spaces - expected_spaces+ + # Get relative path for GitHub annotation+ try:+ relative_path = str(self.file_path.relative_to(Path.cwd()))+ except ValueError:+ relative_path = str(self.file_path)+ + self.errors.append(LintError(+ file=relative_path,+ line=j + 1, # Convert to 1-indexed+ message=f"[operator-alignment] Variable '{var_name}': Operator '{operator}' at column {actual_spaces}, expected {expected_spaces} (off by {diff:+d})",+ repo_url=self.repo_url,+ commit_sha=self.commit_sha+ ))+ + # Stop if we've reached the end of the Jinja block+ if '}}' in continuation_line:+ break+ + j += 1+ + # Skip ahead past the multi-line block we just processed+ i = j+ + i += 1+ + def check_ifelse_alignment(self):+ """+ Rule 2: Check if/else keywords align within same {{ }} brackets+ + Example:+ variable: "{{ value+ if condition+ else other_value }}"+ ^ if and else must align vertically+ """+ in_multiline_jinja = False+ jinja_lines = []+ jinja_start_line = 0+ + for i, line in enumerate(self.lines, 1):+ # Detect start of multi-line Jinja expression+ if '"{{' in line and '}}' not in line:+ in_multiline_jinja = True+ jinja_lines = [line]+ jinja_start_line = i+ + elif in_multiline_jinja:+ jinja_lines.append(line)+ + # Check if we've reached the end of the Jinja block+ if '}}' in line:+ # Find lines containing 'if' and 'else'+ if_lines = [l for l in jinja_lines if ' if ' in l or l.strip().startswith('if ')]+ else_lines = [l for l in jinja_lines if ' else ' in l or l.strip().startswith('else ')]+ + # Only check if both if and else exist in the same block+ if if_lines and else_lines:+ # Find indentation of 'if' and 'else' keywords+ if_indent = None+ else_indent = None+ + for l in jinja_lines:+ if ' if ' in l:+ # Calculate column position where 'if' appears+ if_indent = len(l) - len(l.lstrip())+ if ' else ' in l or l.strip().startswith('else '):+ else_indent = len(l) - len(l.lstrip())+ + # Check if if and else align+ if if_indent is not None and else_indent is not None:+ if if_indent != else_indent:+ diff = else_indent - if_indent+ # Get relative path for GitHub annotation+ try:+ relative_path = str(self.file_path.relative_to(Path.cwd()))+ except ValueError:+ relative_path = str(self.file_path)+ + self.errors.append(LintError(+ file=relative_path,+ line=jinja_start_line,+ message=f"[ifelse-alignment] 'if' at column {if_indent} doesn't align with 'else' at column {else_indent} (off by {diff:+d})",+ repo_url=self.repo_url,+ commit_sha=self.commit_sha+ ))+ + # Reset state+ in_multiline_jinja = False+ jinja_lines = []+ + def lint(self) -> List[LintError]:+ """Run all lint checks and return list of errors"""+ self.check_operator_alignment()+ self.check_ifelse_alignment()+ return self.errors+ + + def main():+ """Main entry point for the linter"""+ import os+ + if len(sys.argv) < 2:+ print("Usage: python3 saltbox-defaults-linter.py <roles_directory>")+ print("\nExample:")+ print(" python3 saltbox-defaults-linter.py roles/")+ sys.exit(1)+ + roles_dir = Path(sys.argv[1])+ + if not roles_dir.exists():+ print(f"Error: Directory '{roles_dir}' does not exist")+ sys.exit(1)+ + if not roles_dir.is_dir():+ print(f"Error: '{roles_dir}' is not a directory")+ sys.exit(1)+ + # Get GitHub repo and commit info from environment variables (set by GitHub Actions)+ # GITHUB_REPOSITORY format: "owner/repo"+ # GITHUB_SHA: commit SHA that triggered the workflow+ github_repo = os.environ.get('GITHUB_REPOSITORY')+ github_sha = os.environ.get('GITHUB_SHA')+ + # Build full repo URL if we have the repository name+ repo_url = f"https://github.com/{github_repo}" if github_repo else None+ + all_errors = []+ files_checked = 0+ + # Find and lint all defaults/main.yml files+ for defaults_file in sorted(roles_dir.glob("*/defaults/main.yml")):+ linter = DefaultsLinter(defaults_file, repo_url=repo_url, commit_sha=github_sha)+ errors = linter.lint()+ all_errors.extend(errors)+ files_checked += 1+ + # Output results+ if all_errors:+ print(f"❌ Found {len(all_errors)} formatting error(s) in {files_checked} file(s):\n")+ for error in all_errors:+ print(error.to_github_annotation())+ print(f"\nTotal: {len(all_errors)} error(s)")+ sys.exit(1)+ else:+ print(f"✅ All {files_checked} role defaults files pass formatting checks")+ sys.exit(0)+ + + if __name__ == "__main__":+ main()
removed
resources/roles/dns/files/fetch_cloudflare_records.py
- import argparse- import cloudflare- from cloudflare import Cloudflare- import json- import sys- import datetime- from typing import List, cast- - - class CustomJSONEncoder(json.JSONEncoder):- def default(self, obj):- if isinstance(obj, datetime.datetime):- return obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')- return json.JSONEncoder.default(self, obj)- - - def fetch_dns_records(client: Cloudflare, zone_id: str, record_name: str) -> list:- try:- records = client.dns.records.list(zone_id=zone_id, name=record_name).to_dict()- results = cast(List[dict], records["result"])- return results- except cloudflare.APIConnectionError as e:- print(f'Error fetching DNS records: {e}', file=sys.stderr)- raise- except Exception as e:- print(f'Unexpected error: {e}', file=sys.stderr)- raise- - - def get_zone_id(client: Cloudflare, zone_name: str) -> str:- try:- zone = client.zones.list(name=zone_name)- if len(zone.result) == 0:- raise ValueError(f'Specified zone: {zone_name} was not found')- return zone.result[0].id- except cloudflare.APIConnectionError as e:- print(f'Error fetching zone ID: {e}', file=sys.stderr)- raise- except Exception as e:- print(f'Unexpected error: {e}', file=sys.stderr)- raise- - - def main():- parser = argparse.ArgumentParser(- prog='Saltbox Cloudflare Helper',- description='Parses Cloudflare Zone Records',- epilog='')- - parser.add_argument('--auth_key', required=True, help='API key for Cloudflare')- parser.add_argument('--auth_email', required=True, help='Email associated with Cloudflare account')- parser.add_argument('--zone_name', required=True, help='Name of the Cloudflare zone')- parser.add_argument('--record', required=True, help='DNS record to fetch')- - args = parser.parse_args()- - cf = Cloudflare(api_email=args.auth_email, api_key=args.auth_key)- - try:- zone_id = get_zone_id(cf, args.zone_name)- print(json.dumps(fetch_dns_records(cf, zone_id, args.record), cls=CustomJSONEncoder))- except ValueError as e:- print(f'Error: {e}', file=sys.stderr)- exit(1)- except Exception as e:- print(f'Failed to fetch DNS records: {e}', file=sys.stderr)- exit(1)- - exit(0)- - - if __name__ == '__main__':- main()
removed
resources/roles/dns/files/requirements.txt
- requests==2.32.5- cloudflare==4.3.1
removed
resources/roles/dns/tasks/cloudflare/subtasks/checksum.yml
- #########################################################################- # Title: Saltbox: DNS | Cloudflare | Checksum #- # Author(s): salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: Cloudflare | Checksum | Get 'fetch_cloudflare_records.py' checksum- ansible.builtin.stat:- path: "{{ resources_path }}/roles/dns/files/fetch_cloudflare_records.py"- checksum_algorithm: sha256- register: repo_fetch_cloudflare_records_py_checksum- - - name: Cloudflare | Checksum | Get 'requirements.txt' checksum- ansible.builtin.stat:- path: "{{ resources_path }}/roles/dns/files/requirements.txt"- checksum_algorithm: sha256- register: repo_requirements_txt_checksum- - - name: Cloudflare | Checksum | Get installed 'fetch_cloudflare_records.py' checksum- ansible.builtin.stat:- path: "{{ cloudflare_path }}/fetch_cloudflare_records.py"- checksum_algorithm: sha256- register: installed_fetch_cloudflare_records_py_checksum- - - name: Cloudflare | Checksum | Get installed 'requirements.txt' checksum- ansible.builtin.stat:- path: "{{ cloudflare_path }}/requirements.txt"- checksum_algorithm: sha256- register: installed_requirements_txt_checksum- - - name: Cloudflare | Checksum | Check for updates- ansible.builtin.set_fact:- cloudflare_reinstall: true- when: (not installed_fetch_cloudflare_records_py_checksum.stat.exists) or- (not installed_requirements_txt_checksum.stat.exists) or- (repo_fetch_cloudflare_records_py_checksum.stat.checksum != installed_fetch_cloudflare_records_py_checksum.stat.checksum) or- (repo_requirements_txt_checksum.stat.checksum != installed_requirements_txt_checksum.stat.checksum)
removed
resources/roles/dns/tasks/cloudflare/subtasks/python.yml
- #########################################################################- # Title: Saltbox: DNS | Cloudflare | Python #- # Author(s): salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: "Cloudflare | Python | Verify Python {{ cloudflare_python_version }} installation"- ansible.builtin.command: "{{ python_bin }} python find {{ cloudflare_python_version }} --managed-python"- register: cloudflare_uv_python_path- changed_when: false- failed_when: false- environment: "{{ python_environment }}"- become: true- become_user: "{{ user.name }}"- - - name: "Cloudflare | Python | Check if Python needs upgrading"- ansible.builtin.set_fact:- cloudflare_reinstall: "{{ (cloudflare_uv_python_path.rc != 0)- or (cloudflare_folder_symlink.stat.lnk_target is defined- and (cloudflare_folder_symlink.stat.lnk_target != cloudflare_uv_python_path.stdout)) }}"
removed
resources/roles/dns/tasks/cloudflare/subtasks/setup.yml
- #########################################################################- # Title: Saltbox: DNS | Cloudflare | Setup #- # Author(s): salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: Cloudflare | Setup | Setup venv block- block:- - name: Cloudflare | Setup | Cleanup folder- ansible.builtin.file:- path: "{{ cloudflare_path }}"- state: absent- - - name: Cloudflare | Setup | Create directory- ansible.builtin.file:- path: "{{ cloudflare_path }}"- state: directory- owner: "{{ user.name }}"- group: "{{ user.name }}"- mode: "0775"- recurse: true- - - name: Cloudflare | Setup | Import Cloudflare files- ansible.builtin.copy:- src: "{{ item }}"- force: true- dest: "{{ cloudflare_path }}/{{ item | basename }}"- owner: "{{ user.name }}"- group: "{{ user.name }}"- mode: "0664"- with_items: "{{ cloudflare_files }}"- - - name: Cloudflare | Setup | Execute Python role- ansible.builtin.include_role:- name: "python"- vars:- python_version: "{{ cloudflare_python_version }}"- - - name: "Cloudflare | Setup | Lookup Python {{ cloudflare_python_version }} installation"- ansible.builtin.command: "{{ python_bin }} python find {{ cloudflare_python_version }} --managed-python"- register: cloudflare_python_install_path_lookup- changed_when: false- environment: "{{ python_environment }}"- become: true- become_user: "{{ user.name }}"- - - name: Cloudflare | Setup | Set Python version- ansible.builtin.set_fact:- cloudflare_python_install_path: "{{ cloudflare_python_install_path_lookup.stdout }}"- - - name: Cloudflare | Setup | Delete venv folder- ansible.builtin.file:- path: "{{ cloudflare_venv_path }}"- state: absent- - - name: Cloudflare | Create venv- ansible.builtin.command:- cmd: "{{ cloudflare_python_install_path }} -m venv {{ cloudflare_venv_path }}"- args:- creates: "{{ cloudflare_venv_path }}"- become: true- become_user: "{{ user.name }}"- - - name: Cloudflare | Install pip requirements- ansible.builtin.pip:- requirements: "{{ cloudflare_requirements_path }}"- virtualenv: "{{ cloudflare_venv_path }}"- virtualenv_command: "{{ cloudflare_venv_path }}/bin/python3 -m pip"- become: true- become_user: "{{ user.name }}"- - rescue:- - name: Cloudflare | Setup | Delete venv folder- ansible.builtin.file:- path: "{{ cloudflare_venv_path }}"- state: absent- - - name: Cloudflare | Setup | Print Failure- ansible.builtin.fail:- msg:- - "Setting up the Cloudflare venv failed for some reason."- - "This usually means issues with apt. Make sure 'sudo apt update' and 'sudo apt upgrade' works before retrying."
removed
resources/tasks/instances/readarr.yml
- ############################################################################- # Title: Saltbox: Resources | Tasks | Instances | Get Readarr Info #- # Author(s): salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- ############################################################################- # GNU General Public License v3.0 #- ############################################################################- ---- - name: Resources | Tasks | Instances | Get Info | Check if Readarr exists- ansible.builtin.stat:- path: "{{ readarr_paths_config_location }}"- register: readarr_paths_config_location_stat- - - name: Resources | Tasks | Instances | Get Info | Readarr API Key tasks- when: readarr_paths_config_location_stat.stat.exists- block:- - name: Resources | Tasks | Instances | Get Info | Fetch Readarr API Key- community.general.xml:- path: "{{ readarr_paths_config_location }}"- xpath: /Config/ApiKey- content: text- register: xmlresp- - - name: Resources | Tasks | Instances | Get Info | Set 'readarr_info' variable- ansible.builtin.set_fact:- readarr_info: "{{ readarr_info | default({}) | combine({readarr_name: {'name': readarr_name, 'url': readarr_web_url, 'api_key': xmlresp.matches[0].ApiKey}}) }}"- - - name: Resources | Tasks | Instances | Get Info | Set 'readarr_info' variable- ansible.builtin.set_fact:- readarr_info: "{{ readarr_info | default({}) | combine({readarr_name: {'name': readarr_name, 'url': readarr_web_url, 'api_key': 'not installed'}}) }}"- when: (not readarr_paths_config_location_stat.stat.exists)
removed
roles/autobrr/templates/config.toml.j2
- # config.toml- - # Hostname / IP- #- # Default: "localhost"- #- host = "0.0.0.0"- - # Port- #- # Default: 7474- #- port = 7474- - # Base url- # Set custom baseUrl eg /autobrr/ to serve in subdirectory.- # Not needed for subdomain, or by accessing with the :port directly.- #- # Optional- #- #baseUrl = "/autobrr/"- - # autobrr logs file- # If not defined, logs to stdout- #- # Optional- #- logPath = "log/autobrr.log"- - # Log level- #- # Default: "DEBUG"- #- # Options: "ERROR", "DEBUG", "INFO", "WARN", "TRACE"- #- logLevel = "INFO"- - # Session secret- # Can be generated by running: head /dev/urandom | tr -dc A-Za-z0-9 | head -c16- sessionSecret = "{{ autobrr_secret.stdout }}"
removed
roles/cloudplow/tasks/subtasks/disable.yml
- #########################################################################- # Title: Saltbox: Cloudplow | Disable Task #- # Author(s): salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: Delete Service- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/systemd/delete_service.yml"- vars:- _service_name: "{{ cloudplow_service_name }}"
removed
roles/diag/files/cloudflare_ssl.py
- import argparse- import cloudflare- from cloudflare import Cloudflare- - - def get_zone_id(client: Cloudflare, zone_name: str) -> str:- try:- zone = client.zones.list(name=zone_name)- if len(zone.result) == 0:- raise ValueError(f'Specified zone: {zone_name} was not found')- return zone.result[0].id- except cloudflare.APIConnectionError as e:- exit(f'Error fetching zone ID: {e}')- except Exception as e:- exit(f'Unexpected error: {e}')- - - def get_ssl_tls_mode(auth_email, auth_key, zone_name):- cf = Cloudflare(api_email=auth_email, api_key=auth_key)- - zone_id = get_zone_id(cf, zone_name)- - # Get the SSL/TLS settings for the zone- try:- ssl_settings = cf.zones.settings.get(setting_id='ssl', zone_id=zone_id).to_dict()- ssl_mode = ssl_settings['value']- print(f"{ssl_mode}")- except cloudflare.APIConnectionError as e:- exit(f'/zones/settings/ssl.get - {e} - API call failed')- except Exception as e:- exit(f'/zones/settings/ssl.get - {e} - API call failed')- - - def main():- parser = argparse.ArgumentParser(- prog='SSL/TLS Mode Checker',- description='Retrieves Cloudflare SSL/TLS Encryption Mode for a Zone',- epilog='')- - parser.add_argument('--auth_key', required=True, help='Cloudflare API key')- parser.add_argument('--auth_email', required=True, help='Cloudflare account email')- parser.add_argument('--zone_name', required=True, help='Name of the Cloudflare zone')- - args = parser.parse_args()- - get_ssl_tls_mode(args.auth_email, args.auth_key, args.zone_name)- - - if __name__ == '__main__':- main()
removed
roles/jaeger/defaults/main.yml
- ##########################################################################- # Title: Saltbox: Jaeger | Default Variables #- # Author(s): salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- ##########################################################################- # GNU General Public License v3.0 #- ##########################################################################- ---- ################################- # Basics- ################################- - jaeger_name: jaeger- - ################################- # Paths- ################################- - jaeger_paths_folder: "{{ jaeger_name }}"- jaeger_paths_location: "{{ server_appdata_path }}/{{ jaeger_paths_folder }}"- jaeger_paths_folders_list:- - "{{ jaeger_paths_location }}"- - ################################- # Web- ################################- - jaeger_web_subdomain: "{{ jaeger_name }}"- jaeger_web_domain: "{{ user.domain }}"- jaeger_web_port: "16686"- jaeger_web_url: "{{ 'https://' + (jaeger_web_subdomain + '.' + jaeger_web_domain- if (jaeger_web_subdomain | length > 0)- else jaeger_web_domain) }}"- - ################################- # DNS- ################################- - jaeger_dns_record: "{{ jaeger_web_subdomain }}"- jaeger_dns_zone: "{{ jaeger_web_domain }}"- jaeger_dns_proxy: "{{ dns.proxied }}"- - ################################- # Traefik- ################################- - jaeger_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"- jaeger_traefik_middleware_default: "{{ traefik_default_middleware }}"- jaeger_traefik_middleware_custom: ""- jaeger_traefik_certresolver: "{{ traefik_default_certresolver }}"- jaeger_traefik_enabled: true- jaeger_traefik_api_enabled: false- jaeger_traefik_api_endpoint: ""- - ################################- # Docker- ################################- - # Container- jaeger_docker_container: "{{ jaeger_name }}"- - # Image- jaeger_docker_image_pull: true- jaeger_docker_image_tag: "latest"- jaeger_docker_image: "jaegertracing/all-in-one:{{ jaeger_docker_image_tag }}"- - # Ports- jaeger_docker_ports_defaults: []- jaeger_docker_ports_custom: []- jaeger_docker_ports: "{{ jaeger_docker_ports_defaults- + jaeger_docker_ports_custom }}"- - # Envs- jaeger_docker_envs_default:- TZ: "{{ tz }}"- COLLECTOR_ZIPKIN_HTTP_PORT: "9411"- jaeger_docker_envs_custom: {}- jaeger_docker_envs: "{{ jaeger_docker_envs_default- | combine(jaeger_docker_envs_custom) }}"- - # Commands- jaeger_docker_commands_default: []- jaeger_docker_commands_custom: []- jaeger_docker_commands: "{{ jaeger_docker_commands_default- + jaeger_docker_commands_custom }}"- - # Volumes- jaeger_docker_volumes_default: []- jaeger_docker_volumes_custom: []- jaeger_docker_volumes: "{{ jaeger_docker_volumes_default- + jaeger_docker_volumes_custom }}"- - # Devices- jaeger_docker_devices_default: []- jaeger_docker_devices_custom: []- jaeger_docker_devices: "{{ jaeger_docker_devices_default- + jaeger_docker_devices_custom }}"- - # Hosts- jaeger_docker_hosts_default: {}- jaeger_docker_hosts_custom: {}- jaeger_docker_hosts: "{{ docker_hosts_common- | combine(jaeger_docker_hosts_default)- | combine(jaeger_docker_hosts_custom) }}"- - # Labels- jaeger_docker_labels_default: {}- jaeger_docker_labels_custom: {}- jaeger_docker_labels: "{{ docker_labels_common- | combine(jaeger_docker_labels_default)- | combine(jaeger_docker_labels_custom) }}"- - # Hostname- jaeger_docker_hostname: "{{ jaeger_name }}"- - # Networks- jaeger_docker_networks_alias: "{{ jaeger_name }}"- jaeger_docker_networks_default: []- jaeger_docker_networks_custom: []- jaeger_docker_networks: "{{ docker_networks_common- + jaeger_docker_networks_default- + jaeger_docker_networks_custom }}"- - # Capabilities- jaeger_docker_capabilities_default: []- jaeger_docker_capabilities_custom: []- jaeger_docker_capabilities: "{{ jaeger_docker_capabilities_default- + jaeger_docker_capabilities_custom }}"- - # Security Opts- jaeger_docker_security_opts_default: []- jaeger_docker_security_opts_custom: []- jaeger_docker_security_opts: "{{ jaeger_docker_security_opts_default- + jaeger_docker_security_opts_custom }}"- - # Restart Policy- jaeger_docker_restart_policy: unless-stopped- - # State- jaeger_docker_state: started
removed
roles/jaeger/tasks/main.yml
- #########################################################################- # Title: Saltbox: Jaeger Role #- # Author(s): salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: Add DNS record- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml"- vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"- - - name: Remove existing Docker container- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"- - - name: Create directories- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/directories/create_directories.yml"- - - name: Create Docker container- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml"
removed
roles/readarr/defaults/main.yml
- ##########################################################################- # Title: Saltbox: Readarr | Default Variables #- # Author(s): salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- ##########################################################################- # GNU General Public License v3.0 #- ##########################################################################- ---- ################################- # Basics- ################################- - readarr_instances: ["readarr"]- - ################################- # Settings- ################################- - readarr_external_auth: true- - ################################- # Paths- ################################- - readarr_paths_folder: "{{ readarr_name }}"- readarr_paths_location: "{{ server_appdata_path }}/{{ readarr_paths_folder }}"- readarr_paths_folders_list:- - "{{ readarr_paths_location }}"- - "/mnt/local/Media/Books"- readarr_paths_config_location: "{{ readarr_paths_location }}/config.xml"- - ################################- # Web- ################################- - readarr_web_subdomain: "{{ readarr_name }}"- readarr_web_domain: "{{ user.domain }}"- readarr_web_port: "8787"- readarr_web_url: "{{ 'https://' + (lookup('vars', readarr_name + '_web_subdomain', default=readarr_web_subdomain) + '.' + lookup('vars', readarr_name + '_web_domain', default=readarr_web_domain)- if (lookup('vars', readarr_name + '_web_subdomain', default=readarr_web_subdomain) | length > 0)- else lookup('vars', readarr_name + '_web_domain', default=readarr_web_domain)) }}"- - ################################- # DNS- ################################- - readarr_dns_record: "{{ lookup('vars', readarr_name + '_web_subdomain', default=readarr_web_subdomain) }}"- readarr_dns_zone: "{{ lookup('vars', readarr_name + '_web_domain', default=readarr_web_domain) }}"- readarr_dns_proxy: "{{ dns.proxied }}"- - ################################- # Traefik- ################################- - readarr_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"- readarr_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', readarr_name + '_name', default=readarr_name)- if (readarr_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"- readarr_traefik_middleware_custom: ""- readarr_traefik_certresolver: "{{ traefik_default_certresolver }}"- readarr_traefik_enabled: true- readarr_traefik_api_enabled: true- readarr_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/feed`) || PathPrefix(`/ping`)"- - ################################- # API- ################################- - # default to blank- readarr_api_key:- - ################################- # THEME- ################################- - # Options can be found at https://github.com/themepark-dev/theme.park- readarr_themepark_enabled: false- readarr_themepark_app: "readarr"- readarr_themepark_theme: "{{ global_themepark_theme }}"- readarr_themepark_domain: "{{ global_themepark_domain }}"- readarr_themepark_addons: []- - ################################- # Docker- ################################- - # Container- readarr_docker_container: "{{ readarr_name }}"- - # Image- readarr_docker_image_pull: true- readarr_docker_image_repo: "ghcr.io/hotio/readarr"- readarr_docker_image_tag: "latest"- readarr_docker_image: "{{ lookup('vars', readarr_name + '_docker_image_repo', default=readarr_docker_image_repo)- + ':' + lookup('vars', readarr_name + '_docker_image_tag', default=readarr_docker_image_tag) }}"- - # Ports- readarr_docker_ports_defaults: []- readarr_docker_ports_custom: []- readarr_docker_ports: "{{ lookup('vars', readarr_name + '_docker_ports_defaults', default=readarr_docker_ports_defaults)- + lookup('vars', readarr_name + '_docker_ports_custom', default=readarr_docker_ports_custom) }}"- - # Envs- readarr_docker_envs_default:- PUID: "{{ uid }}"- PGID: "{{ gid }}"- UMASK: "002"- TZ: "{{ tz }}"- readarr_docker_envs_custom: {}- readarr_docker_envs: "{{ lookup('vars', readarr_name + '_docker_envs_default', default=readarr_docker_envs_default)- | combine(lookup('vars', readarr_name + '_docker_envs_custom', default=readarr_docker_envs_custom)) }}"- - # Commands- readarr_docker_commands_default: []- readarr_docker_commands_custom: []- readarr_docker_commands: "{{ lookup('vars', readarr_name + '_docker_commands_default', default=readarr_docker_commands_default)- + lookup('vars', readarr_name + '_docker_commands_custom', default=readarr_docker_commands_custom) }}"- - # Volumes- readarr_docker_volumes_default:- - "{{ readarr_paths_location }}:/config"- - "{{ server_appdata_path }}/scripts:/scripts"- readarr_docker_volumes_custom: []- readarr_docker_volumes: "{{ lookup('vars', readarr_name + '_docker_volumes_default', default=readarr_docker_volumes_default)- + lookup('vars', readarr_name + '_docker_volumes_custom', default=readarr_docker_volumes_custom) }}"- - # Devices- readarr_docker_devices_default: []- readarr_docker_devices_custom: []- readarr_docker_devices: "{{ lookup('vars', readarr_name + '_docker_devices_default', default=readarr_docker_devices_default)- + lookup('vars', readarr_name + '_docker_devices_custom', default=readarr_docker_devices_custom) }}"- - # Hosts- readarr_docker_hosts_default: {}- readarr_docker_hosts_custom: {}- readarr_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', readarr_name + '_docker_hosts_default', default=readarr_docker_hosts_default))- | combine(lookup('vars', readarr_name + '_docker_hosts_custom', default=readarr_docker_hosts_custom)) }}"- - # Labels- readarr_docker_labels_default: {}- readarr_docker_labels_custom: {}- readarr_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', readarr_name + '_docker_labels_default', default=readarr_docker_labels_default))- | combine((traefik_themepark_labels- if (readarr_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', readarr_name + '_docker_labels_custom', default=readarr_docker_labels_custom)) }}"- - # Hostname- readarr_docker_hostname: "{{ readarr_name }}"- - # Network Mode- readarr_docker_network_mode_default: "{{ docker_networks_name_common }}"- readarr_docker_network_mode: "{{ lookup('vars', readarr_name + '_docker_network_mode_default', default=readarr_docker_network_mode_default) }}"- - # Networks- readarr_docker_networks_alias: "{{ readarr_name }}"- readarr_docker_networks_default: []- readarr_docker_networks_custom: []- readarr_docker_networks: "{{ docker_networks_common- + lookup('vars', readarr_name + '_docker_networks_default', default=readarr_docker_networks_default)- + lookup('vars', readarr_name + '_docker_networks_custom', default=readarr_docker_networks_custom) }}"- - # Capabilities- readarr_docker_capabilities_default: []- readarr_docker_capabilities_custom: []- readarr_docker_capabilities: "{{ lookup('vars', readarr_name + '_docker_capabilities_default', default=readarr_docker_capabilities_default)- + lookup('vars', readarr_name + '_docker_capabilities_custom', default=readarr_docker_capabilities_custom) }}"- - # Security Opts- readarr_docker_security_opts_default: []- readarr_docker_security_opts_custom: []- readarr_docker_security_opts: "{{ lookup('vars', readarr_name + '_docker_security_opts_default', default=readarr_docker_security_opts_default)- + lookup('vars', readarr_name + '_docker_security_opts_custom', default=readarr_docker_security_opts_custom) }}"- - # Restart Policy- readarr_docker_restart_policy: unless-stopped- - # State- readarr_docker_state: started
removed
roles/readarr/tasks/main.yml
- #########################################################################- # Title: Saltbox: Readarr Role #- # Author(s): salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: "Execute Readarr roles"- ansible.builtin.include_tasks: main2.yml- vars:- readarr_name: "{{ instance }}"- with_items: "{{ readarr_instances }}"- loop_control:- loop_var: instance
removed
roles/readarr/tasks/main2.yml
- #########################################################################- # Title: Saltbox: Readarr #- # Author(s): salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: Add DNS record- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml"- vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"- - - name: Remove existing Docker container- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"- - - name: Create directories- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/directories/create_directories.yml"- - - name: Create Docker container- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml"- - - name: "Tweak Settings when SSO is enabled"- ansible.builtin.import_tasks: "subtasks/auth.yml"- when: (lookup('vars', readarr_name + '_traefik_sso_middleware', default=readarr_traefik_sso_middleware) | length > 0) and lookup('vars', readarr_name + '_external_auth', default=readarr_external_auth)
removed
roles/readarr/tasks/subtasks/auth.yml
- #########################################################################- # Title: Saltbox: Readarr | Auth Tasks #- # Author(s): salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: Auth | Wait for 'config.xml' to be created- ansible.builtin.wait_for:- path: "/opt/{{ readarr_name }}/config.xml"- state: present- - - name: Auth | Wait for 10 seconds- ansible.builtin.wait_for:- timeout: 10- - - name: Auth | Lookup AuthenticationMethod value- community.general.xml:- path: "/opt/{{ readarr_name }}/config.xml"- xpath: "/Config/AuthenticationMethod"- content: "text"- register: xmlresp- - - name: Auth | Toggle AuthenticationMethod to External- when: ((xmlresp.matches[0].AuthenticationMethod is defined) and (xmlresp.matches[0].AuthenticationMethod != 'External'))- block:- - name: Auth | Change the 'AuthenticationMethod' attribute to 'External'- community.general.xml:- path: "/opt/{{ readarr_name }}/config.xml"- xpath: "/Config/AuthenticationMethod"- value: "External"- - - name: Auth | Restart Docker container- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/restart_docker_container.yml"
removed
roles/scripts/files/plex_autoscan_url.sh
- #!/usr/bin/env bash- #########################################################################- # Title: Plex Autoscan URL Script #- # Author(s): desimaniac #- # URL: https://github.com/saltyorg/Saltbox #- # Description: Prints out the Plex Autoscan URL. #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- - ################################- # Constants- ################################- - # Regular colors- readonly NORMAL="\033[0;39m"- readonly GREEN="\033[32m"- - # Bold colors- readonly BRED="\033[1;31m"- readonly BWHITE="\033[1;37m"- readonly BBLUE="\033[1;34m"- - # Config files- readonly SB_ANSIBLE="/srv/git/saltbox/ansible.cfg"- readonly SB_ACCOUNTS="/srv/git/saltbox/accounts.yml"- readonly PAS_CONFIG="/opt/plex_autoscan/config/config.json"- - # Boolean vars- readonly TRUE=1- readonly FALSE=0- - ################################- # Functions- ################################- - function banner1() {- if [ -x "$(command -v toilet)" ]; then- echo ""- toilet 'Plex Autoscan URL' -f standard --filter metal --filter border:metal --width 86- fi- }- - function banner2() {- echo -e "- ${GREEN}┌───────────────────────────────────────────────────────────────────────────────────┐- ${GREEN}│ Title: Plex Autoscan URL Script │- ${GREEN}│ Author(s): desimaniac │- ${GREEN}│ URL: https://github.com/saltyorg/Saltbox │- ${GREEN}│ Description: Prints out the Plex Autoscan URL. │- ${GREEN}├───────────────────────────────────────────────────────────────────────────────────┤- ${GREEN}│ GNU General Public License v3.0 │- ${GREEN}└───────────────────────────────────────────────────────────────────────────────────┘- ${NORMAL}"- }- - function sanity_check() {- # Sanity checks- if ! [[ -x "$(command -v jq)" ]]; then- echo -e ${BRED}" Error: "${NORMAL}"'"${BWHITE}"jq"${NORMAL}"' is not installed."\- ${NORMAL}"Run '"${BWHITE}"sudo apt-get install jq"${NORMAL}"' to install." >&2- echo ""- exit 1- elif ! [[ -x "$(command -v yyq)" ]]; then- echo -e ${BRED}" Error: "${NORMAL}"'"${BWHITE}"yyq"${NORMAL}"' is not installed."\- ${NORMAL}"Run '"${BWHITE}"sb install yyq"${NORMAL}"' to install." >&2- echo ""- exit 1- elif [[ ! -f ${PAS_CONFIG} ]]; then- echo -e ${BRED}" Error: "${NORMAL}"File '"${BWHITE}${PAS_CONFIG}${NORMAL}"' is not found." >&2- echo ""- exit 1- elif [[ ! -f ${SB_ACCOUNTS} ]]; then- echo -e ${BRED}" Error: "${NORMAL}"File '"${BWHITE}${SB_ACCOUNTS}${NORMAL}"' is not found." >&2- echo ""- exit 1- fi- - # Validate JSON file- cat ${PAS_CONFIG} | jq -e . >/dev/null 2>&1- rc=$?- if [[ $rc != 0 ]]; then- echo -e ${BRED}" Error: "${NORMAL}"Invalid JSON format in '"${BWHITE}${PAS_CONFIG}${NORMAL}"'." >&2- echo ""- echo -e " See 'JSON Format Errors' on the Wiki FAQ page." >&2- echo ""- exit 1- fi- }- - function build_url() {- # Get variables from Plex Autoscan config- SERVER_IP=$(cat ${PAS_CONFIG} | jq -r .SERVER_IP)- SERVER_PORT=$(cat ${PAS_CONFIG} | jq -r .SERVER_PORT)- SERVER_PASS=$(cat ${PAS_CONFIG} | jq -r .SERVER_PASS)- - # Get variables from Saltbox account settings- DOMAIN=$(yyq '.user.domain' ${SB_ACCOUNTS} )- - # If SERVER_IP is 0.0.0.0, assign public IP address to REAL_IP.- if [[ ${SERVER_IP} = 0.0.0.0 ]]; then- REAL_IP="$(dig -4 TXT +short o-o.myaddr.l.google.com @ns1.google.com | awk -F'\"' '{ print $2}')"- else- REAL_IP=${SERVER_IP}- fi- - # Declare Subdomains Array- declare -a SUBDOMAINS=(- "plex.${DOMAIN}"- "mediabox.${DOMAIN}"- "saltbox.${DOMAIN}"- "${REAL_IP}"- )- - # Get length of the subdomains array- SUBDOMAIN_LEN=${#SUBDOMAINS[@]}- - # Declare variables for while loop- declare -i COUNT=0- SUBDOMAIN_IP=""- - # Determine which subdomain points to the actual host IP address (vs a CDN one, for example)- while [[ ((${REAL_IP} != ${SUBDOMAIN_IP}) && (${COUNT} < ${SUBDOMAIN_LEN})) ]]; do- SUBDOMAIN=${SUBDOMAINS[$COUNT]}- SUBDOMAIN_IP=$(dig -4 +short ${SUBDOMAIN} @8.8.8.8)- COUNT+=1- done- }- - # Print Plex Autoscan URL- function print_url() {- - if (( SIMPLE - 1 )); then- echo -e ${BWHITE}"Your Plex Autoscan URL:"- echo -e ${BBLUE}"http://${SUBDOMAIN}:${SERVER_PORT}/${SERVER_PASS}"${NORMAL}- echo ""- else- echo http://${SUBDOMAIN}:${SERVER_PORT}/${SERVER_PASS}- fi- - }- - ################################- # Argument Parser- ################################- - ## https://stackoverflow.com/a/39398359- SIMPLE=${FALSE}- # As long as there is at least one more argument, keep looping- while [[ $# -gt 0 ]]; do- key="$1"- case "$key" in- # This flag type option will catch either -s or --simple- -s|--simple)- SIMPLE=${TRUE}- ;;- *)- # Exit when unknown argument is passed- echo "Unknown option '$key'"- exit 10- ;;- esac- shift- done- - ################################- # Main- ################################- - function main ()- {- if [[ ${SIMPLE} == ${FALSE} ]]; then- banner1- banner2- fi- sanity_check- build_url- print_url- }- - main "$@"
removed
roles/scripts/files/plexsql.sh
- #!/bin/bash- # if script name is plexsql.sh then- # usage is - ./plexsql.sh "select something from some_table where something = something_else"- # - mkdir -p /opt/plexsql- sqlplex="/opt/plexsql/Plex SQLite"- docker stop plex- docker cp plex:/usr/lib/plexmediaserver/. /opt/plexsql- cd "/opt/plex/Library/Application Support/Plex Media Server/Plug-in Support/Databases"- cp com.plexapp.plugins.library.db com.plexapp.plugins.library.db.original- "$sqlplex" com.plexapp.plugins.library.db "$1"- - #and if you want to do something other than select statements use this instead- #cp com.plexapp.plugins.library.db com.plexapp.plugins.library.db.original- #"$sqlplex" com.plexapp.plugins.library.db "DROP index 'index_title_sort_naturalsort'"- #"$sqlplex" com.plexapp.plugins.library.db "DELETE from schema_migrations where version='20180501000000'"- #"$sqlplex" com.plexapp.plugins.library.db "$1"
removed
roles/scripts/files/restart_containers.sh
- #!/bin/bash- #########################################################################- # Title: Restart Running Containers Script #- # Author(s): desimaniac #- # URL: https://github.com/saltyorg/Saltbox #- # Description: Stop running containers and start them back up. #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- - - # Regular color(s)- NORMAL="\033[0;39m"- GREEN="\033[32m"- - echo -e "- $GREEN- ┌───────────────────────────────────────────────────────────────────────────────────┐- │ Title: Restart Running Containers Script │- │ Author(s): desimaniac, salty │- │ URL: https://github.com/saltyorg/Saltbox │- │ Description: Stop running containers and start them back up. │- ├───────────────────────────────────────────────────────────────────────────────────┤- │ GNU General Public License v3.0 │- └───────────────────────────────────────────────────────────────────────────────────┘- $NORMAL- "- - containers=$(docker ps -q)- echo Stopping $containers- docker=$(docker stop $containers)- - sleep 3- - echo Starting $containers- docker=$(docker start $containers)
removed
roles/settings/files/settings-updater.py
- #!/srv/ansible/venv/bin/python3- """- - #########################################################################- # Title: Settings Updater Script #- # Author(s): l3uddz, chazlarson, salty #- # URL: https://github.com/saltyorg/Saltbox #- # Description: Adds variables to settings.yml. #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- - """- import logging- import os- import sys- from logging.handlers import RotatingFileHandler- - from ruamel.yaml import YAML- from ruamel.yaml.comments import CommentedMap- - ############################################################- # INIT- ############################################################- - - log = None- - - def init_logging(playbook_path):- # log settings- log_format = '%(asctime)s - %(levelname)-10s - %(name)-35s - %(funcName)-35s - %(message)s'- log_file_path = os.path.join(playbook_path, "settings-updater.log")- log_level = logging.DEBUG- - # init root_logger- log_formatter = logging.Formatter(log_format)- root_logger = logging.getLogger()- root_logger.setLevel(log_level)- - # init console_logger- console_handler = logging.StreamHandler(sys.stdout)- console_handler.setFormatter(log_formatter)- root_logger.addHandler(console_handler)- - # init file_logger- file_handler = RotatingFileHandler(- log_file_path,- maxBytes=1024 * 1024 * 5,- backupCount=5- )- file_handler.setFormatter(log_formatter)- root_logger.addHandler(file_handler)- - # Set chosen logging level- root_logger.setLevel(log_level)- - # Get logger- return root_logger.getChild("settings-updater")- - - ############################################################- # UPDATER- ############################################################- - def load_settings(file_to_load):- yaml_instance = YAML()- yaml_instance.preserve_quotes = True- - settings = None- try:- with open(file_to_load, "r") as f:- settings = yaml_instance.load(f)- except Exception:- log.exception("Exception loading %s: ", file_to_load)- return settings- - def dump_settings(settings, file_to_dump):- dumped = False- try:- yaml_instance = YAML()- yaml_instance.preserve_quotes = True- with open(file_to_dump, 'w') as fp:- yaml_instance.dump(settings, fp)- dumped = True- except Exception:- log.exception("Exception dumping upgraded %s: ", file_to_dump)- return dumped- - def is_remote_entry(thing):- ret_val = False- if type(thing) == CommentedMap:- rem_set = {'port', 'remote', 'template', 'cache'}- for k in thing.keys():- ret_val = ret_val or (k in rem_set)- return ret_val- - def _inner_upgrade(settings1, settings2, key=None, overwrite=False):- sub_upgraded = False- merged = settings2.copy()- - if isinstance(settings1, dict):- for k, v in settings1.items():- # missing k- if k not in settings2:- merged[k] = v- sub_upgraded = True- if not key:- log.info("Added %r setting: %s", str(k), str(v))- else:- log.info("Added %r to setting %r: %s", str(k), str(key), str(v))- continue- - # iterate children- if isinstance(v, dict) or isinstance(v, list):- merged[k], did_upgrade = _inner_upgrade(settings1[k], settings2[k], key=k,- overwrite=overwrite)- sub_upgraded = did_upgrade if did_upgrade else sub_upgraded- elif settings1[k] != settings2[k] and overwrite:- merged = settings1- sub_upgraded = True- elif isinstance(settings1, list) and key:- for v in settings1:- is_remote = is_remote_entry(v)- might_not_want_it = len(settings2) > 0- if v not in settings2 and not is_remote and not might_not_want_it:- merged.append(v)- sub_upgraded = True- log.info("Added to setting %r: %s", str(key), str(v))- continue- - return merged, sub_upgraded- - def upgrade_settings(defaults, currents):- upgraded_settings, upgraded = _inner_upgrade(defaults, currents)- return upgraded, upgraded_settings- - ############################################################- # MAIN- ############################################################- - if __name__ == "__main__":- # get playbook dir- if not len(sys.argv) >= 4:- print("3 arguments must be supplied, playbook_dir default_settings current_settings")- sys.exit(1)- playbook_dir = sys.argv[1]- default_file = sys.argv[2]- current_file = sys.argv[3]- - # init logging- log = init_logging(playbook_dir)- - # load settings- default_settings = load_settings(os.path.join(playbook_dir, default_file))- if not default_settings:- log.error("Failed loading \'%s\'. Aborting...", default_file)- sys.exit(1)- - current_settings = load_settings(os.path.join(playbook_dir, current_file))- if not current_settings:- log.error("Failed loading \'%s\'. Aborting...", current_file)- sys.exit(1)- - # compare/upgrade settings- did_upgrade, upgraded_settings = upgrade_settings(default_settings, current_settings)- if not did_upgrade:- log.info("There were no settings changes to apply.")- sys.exit(0)- else:- if not dump_settings(upgraded_settings, os.path.join(playbook_dir, current_file)):- log.error("Failed dumping updated \'%s\'.", current_file)- sys.exit(1)- log.info("Successfully upgraded: \'%s\'.", current_file)- sys.exit(2)
removed
roles/settings/tasks/main.yml
- #########################################################################- # Title: Saltbox: Settings Role #- # Author(s): desimaniac #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: "Settings | Start"- ansible.builtin.include_tasks: "subtasks/start.yml"- - - name: "Settings | Main 2"- ansible.builtin.include_tasks: "main2.yml"- loop: "{{ config_files }}"- loop_control:- loop_var: outer_item- - - name: "Settings | Finish"- ansible.builtin.include_tasks: "subtasks/finish.yml"
removed
roles/settings/tasks/main2.yml
- #########################################################################- # Title: Saltbox: Settings | Main 2 #- # Author(s): desimaniac #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: "Settings | Copy"- ansible.builtin.include_tasks: "subtasks/copy.yml"- vars:- file: "{{ outer_item }}"- - - name: "Settings | Migrator"- ansible.builtin.include_tasks: "subtasks/migrator.yml"- vars:- file: "{{ outer_item }}"- - - name: "Settings | Updater"- ansible.builtin.include_tasks: "subtasks/updater.yml"- vars:- file: "{{ outer_item }}"
removed
roles/settings/tasks/subtasks/copy.yml
- #########################################################################- # Title: Saltbox: Settings | Copy #- # Author(s): desimaniac #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- # Check for config files and import they are missing- - name: "Copy | Check if '{{ file }}' exists"- ansible.builtin.stat:- path: "{{ playbook_dir }}/{{ file }}"- register: file0- - - name: "Copy | Copy '{{ file }}.default' to '{{ file }}'"- ansible.builtin.copy:- src: "{{ playbook_dir }}/defaults/{{ file }}.default"- dest: "{{ playbook_dir }}/{{ file }}"- owner: "{{ saltbox_yml.stat.uid }}"- group: "{{ saltbox_yml.stat.gid }}"- mode: "0664"- when: (not file0.stat.exists)
removed
roles/settings/tasks/subtasks/finish.yml
- #########################################################################- # Title: Saltbox: Settings | Finish #- # Author(s): desimaniac #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: "Finish | Check 'settings-updater.py' for new settings"- ansible.builtin.debug: # noqa jinja[invalid]- msg:- - "The 'settings_updater.py' script updated the following- file{{ 's' if (files_updated_successfully | length > 1) else '' }}: '{{ files_updated_successfully | join(', ') | trim }}'"- - "Please check {{ 'these files' if (files_updated_successfully | length > 1) else 'this file' }} for the newly added settings"- - "You can also review the log file: 'settings-updater.log'"- when: (files_updated_successfully | length > 0)- - - name: Traefik 3.0 Migration block- when: traefik3_migration- block:- - name: Provide link to migration docs # Update to main docs page when docs are updated- ansible.builtin.pause:- prompt: "Make sure you read the migration docs (press enter to continue): https://docs.saltbox.dev/saltbox/upgrade/traefik3/"- - # Make sure accounts.yml has been filled in by user- - name: "Finish | Get stats on 'accounts.yml' for hash check"- ansible.builtin.stat:- path: "{{ playbook_dir }}/accounts.yml"- register: accounts_yml- - - name: "Finish | Get stats on 'accounts.yml.default' for hash check"- ansible.builtin.stat:- path: "{{ playbook_dir }}/defaults/accounts.yml.default"- register: accounts_yml_default- - # Sanity Checks- - name: "Finish | Ensure that 'accounts.yml' is configured"- ansible.builtin.assert:- that:- - accounts_yml.stat.exists- - accounts_yml.stat.checksum != accounts_yml_default.stat.checksum- - user.domain != "testsaltbox.ml"- msg: "You must configure 'accounts.yml' before running the Saltbox installer"- - # Exit playbook When necessary- - name: Finish | Exit Tasks- when: exit_is_necessary- block:- - name: "Finish | Check 'settings-updater.py' run status for errors"- ansible.builtin.debug:- msg:- - "The 'settings_updater.py' script exited with an error when updating the following- file{{ 's' if (files_updated_unsuccessfully | length > 1) else '' }}: '{{ files_updated_unsuccessfully | join(', ') | trim }}'"- - "Please check 'settings-updater.log' for details"- when: (files_updated_unsuccessfully | length > 0)- - - name: "Finish | Exit so that user can check updated config files"- ansible.builtin.debug:- msg: "Saltbox Installer will now exit."- - - name: "Finish | Exit"- ansible.builtin.meta: end_play
removed
roles/settings/tasks/subtasks/migrator.yml
- #########################################################################- # Title: Saltbox: Settings | Migrator #- # Author(s): desimaniac, salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: Migrator | Migrations for all config files- block:- - name: Migrator | All | Migration 01- ansible.builtin.include_tasks: "migrator/all/migration_01.yml"- when: (file != "backup_config.yml")- - - name: Migrator | Migrations for 'accounts.yml'- when: (file == "accounts.yml")- block:- - name: Migrator | 'accounts.yml' | Migration 01- ansible.builtin.include_tasks: "migrator/accounts_yml/migration_01.yml"- - - name: Migrator | Migrations for 'adv_settings.yml'- when: (file == "adv_settings.yml")- block:- - name: Migrator | 'adv_settings.yml' | Migration 01- ansible.builtin.include_tasks: "migrator/adv_settings_yml/migration_01.yml"- - - name: Migrator | Migrations for 'backup_config.yml'- when: (file == "backup_config.yml")- block:- - name: Migrator | 'backup_config.yml' | Migration 01- ansible.builtin.include_tasks: "migrator/backup_config_yml/migration_01.yml"- - - name: Migrator | Migrations for 'providers.yml'- when: (file == "providers.yml")- block:- - name: Migrator | 'providers.yml' | Migration 01- ansible.builtin.include_tasks: "migrator/providers_yml/migration_01.yml"- - - name: Migrator | Migrations for 'settings.yml'- when: (file == "settings.yml")- block:- - name: Migrator | 'settings.yml' | Migration 01- ansible.builtin.include_tasks: "migrator/settings_yml/migration_01.yml"
removed
roles/settings/tasks/subtasks/migrator/accounts_yml/migration_01.yml
- ########################################################################################- # Title: Saltbox: Settings | Migrator | 'accounts.yml' | Migration 01 #- # Author(s): desimaniac #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- ########################################################################################- # GNU General Public License v3.0 #- ########################################################################################- ---- - name: "Migrator | 'accounts.yml' | Migration 01 | Set variables"- ansible.builtin.set_fact:- plex_settings: "{{ not ((plex is undefined)- or- (plex is none)- or- (plex | trim | length == 0)) }}"- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Delete 'plex' dict- ansible.builtin.shell: |- yyq -i 'del(.plex)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: plex_settings- - - name: Migrator | 'accounts.yml' | Migration 01 | Remove 'null' values- ansible.builtin.replace:- path: "{{ playbook_dir }}/{{ file }}"- regexp: '(?<=: )\bnull\s*$'- replace: ''- owner: "{{ saltbox_yml.stat.uid }}"- group: "{{ saltbox_yml.stat.gid }}"- mode: "0664"- - - name: Migrator | 'accounts.yml' | Migration 01 | Re-import Variables- ansible.builtin.include_vars: "{{ playbook_dir }}/{{ file }}"
removed
roles/settings/tasks/subtasks/migrator/adv_settings_yml/migration_01.yml
- ########################################################################################- # Title: Saltbox: Settings | Migrator | 'adv_settings.yml' | Migration 01 #- # Author(s): desimaniac, salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- ########################################################################################- # GNU General Public License v3.0 #- ########################################################################################- - ---- - name: "Migrator | 'adv_settings.yml' | Migration 01 | Set variables"- ansible.builtin.set_fact:- old_traefik_tls_settings: "{{ not ((traefik is undefined)- or- (traefik is none)- or- (traefik | trim | length == 0)- or- (traefik.tls is undefined)- or- (traefik.tls is none)- or- (traefik.tls | trim | length == 0)) }}"- old_traefik_http_settings: "{{ not ((traefik is undefined)- or- (traefik is none)- or- (traefik | trim | length == 0)- or- (traefik.http is undefined)- or- (traefik.http is none)- or- (traefik.http | trim | length == 0)) }}"- old_zerossl_settings: "{{ not ((dns is undefined)- or- (dns is none)- or- (dns | trim | length == 0)- or- (dns.zerossl is undefined)- or- (dns.zerossl is none)- or- (dns.zerossl | trim | length == 0)) }}"- traefik_metrics_settings: "{{ not ((traefik is undefined)- or- (traefik is none)- or- (traefik | trim | length == 0)- or- (traefik.metrics is undefined)- or- (traefik.metrics is none)- or- (traefik.metrics | trim | length == 0)) }}"- old_dockerhub: "{{ not ((dockerhub is undefined)- or- (dockerhub is none)- or- (dockerhub | trim | length == 0)) }}"- old_dns_enabled: "{{ not ((dns.enabled is undefined)- or- (dns.enabled is none)- or- (dns.enabled | trim | length == 0)) }}"- old_feeder_mount: "{{ not ((mounts.feeder is undefined)- or- (mounts.feeder is none)- or- (mounts.feeder | trim | length == 0)) }}"- old_remote_mount: "{{ not ((mounts.remote is undefined)- or- (mounts.remote is none)- or- (mounts.remote | trim | length == 0)) }}"- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Delete 'traefik.tls' dict- ansible.builtin.shell: |- yyq -i 'del(.traefik.tls)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_traefik_tls_settings- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Rebuild 'traefik.cert.http_validation' dict- ansible.builtin.shell: |- yyq -i '.traefik.cert.http_validation = "{{ 'yes' if (traefik.http | default(false)) else 'no' }}"' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_traefik_http_settings- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Delete 'traefik.http' dict- ansible.builtin.shell: |- yyq -i 'del(.traefik.http)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_traefik_http_settings- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Rebuild 'traefik.cert.zerossl' dict- ansible.builtin.shell: |- yyq -i '.traefik.cert.zerossl = "{{ 'yes' if (dns.zerossl | default(false)) else 'no' }}"' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_zerossl_settings- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Delete 'dns.zerossl' dict- ansible.builtin.shell: |- yyq -i 'del(.dns.zerossl)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_zerossl_settings- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Rebuild 'traefik.metrics' dict- ansible.builtin.shell: |- yyq -i '.traefik.metrics = "no"' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: (not traefik_metrics_settings)- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Delete 'dockerhub' dict- ansible.builtin.shell: |- yyq -i 'del(.dockerhub)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_dockerhub- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Delete 'dns.enabled' key-pair- ansible.builtin.shell: |- yyq -i 'del(.dns.enabled)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_dns_enabled- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Delete 'mounts.feeder' dict- ansible.builtin.shell: |- yyq -i 'del(.mounts.feeder)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_feeder_mount- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Delete 'mounts.remote' dict- ansible.builtin.shell: |- yyq -i 'del(.mounts.remote)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_remote_mount- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Delete 'gpu.nvidia' dict- ansible.builtin.shell: |- yyq -i 'del(.gpu.nvidia)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: gpu is defined and gpu.nvidia is defined- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Delete 'traefik.tracing' dict- ansible.builtin.shell: |- yyq -i 'del(.traefik.tracing)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: traefik is defined and traefik.tracing is defined- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Delete 'traefik.subdomains.jaeger' dict- ansible.builtin.shell: |- yyq -i 'del(.traefik.subdomains.jaeger)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: traefik is defined and traefik.subdomains is defined and traefik.subdomains.jaeger is defined- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Remove 'null' values- ansible.builtin.replace:- path: "{{ playbook_dir }}/{{ file }}"- regexp: '(?<=: )\bnull\s*$'- replace: ''- owner: "{{ saltbox_yml.stat.uid }}"- group: "{{ saltbox_yml.stat.gid }}"- mode: "0664"- - - name: Migrator | 'adv_settings.yml' | Migration 01 | Re-import Variables- ansible.builtin.include_vars: "{{ playbook_dir }}/{{ file }}"
removed
roles/settings/tasks/subtasks/migrator/all/migration_01.yml
- ########################################################################################- # Title: Saltbox: Settings | Migrator | All | Migration 01 #- # Author(s): desimaniac, salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- ########################################################################################- # GNU General Public License v3.0 #- ########################################################################################- ---- - name: Migrator | All | '{{ file }}' | Migration 01 | Add single space after colon (if needed)- ansible.builtin.replace:- path: "{{ playbook_dir }}/{{ file }}"- regexp: '(:)[ \t]*(.*)'- replace: '\1 \2'- owner: "{{ saltbox_yml.stat.uid }}"- group: "{{ saltbox_yml.stat.gid }}"- mode: "0664"- - - name: Migrator | All | '{{ file }}' | Migration 01 | Re-import Variables- ansible.builtin.include_vars: "{{ playbook_dir }}/{{ file }}"
removed
roles/settings/tasks/subtasks/migrator/backup_config_yml/migration_01.yml
- ########################################################################################- # Title: Saltbox: Settings | Migrator | 'backup_config.yml' | Migration 01 #- # Author(s): desimaniac, salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- ########################################################################################- # GNU General Public License v3.0 #- ########################################################################################- ---- - name: "Migrator | 'backup_config.yml' | Migration 01 | Set variables"- ansible.builtin.set_fact:- old_settings: "{{ (backup.cron.enable is defined)- or- (backup.cron.cron_state is defined) }}"- - - name: Migrator | 'backup_config.yml' | Migration 01 | Rebuild 'backup.cron' dict- ansible.builtin.shell: |- yyq -i 'del(.backup.cron.cron_state)' {{ playbook_dir }}/{{ file }}- yyq -i 'del(.backup.cron.enable)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_settings- - - name: Migrator | 'backup_config.yml' | Migration 01 | Remove 'null' values- ansible.builtin.replace:- path: "{{ playbook_dir }}/{{ file }}"- regexp: '(?<=: )\bnull\s*$'- replace: ''- owner: "{{ saltbox_yml.stat.uid }}"- group: "{{ saltbox_yml.stat.gid }}"- mode: "0664"- - - name: Migrator | 'backup_config.yml' | Migration 01 | Re-import Variables- ansible.builtin.include_vars: "{{ playbook_dir }}/{{ file }}"
removed
roles/settings/tasks/subtasks/migrator/providers_yml/migration_01.yml
- ########################################################################################- # Title: Saltbox: Settings | Migrator | 'providers.yml' | Migration 01 #- # Author(s): salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- ########################################################################################- # GNU General Public License v3.0 #- ########################################################################################- ---- - name: "Migrator | 'backup_config.yml' | Migration 01 | Set variables"- ansible.builtin.set_fact:- old_godaddy: "{{ (goddady is defined) }}"- old_godaddy_filled: "{{ (goddady is defined) and- (goddady is not none) and- (goddady | trim | length > 0) and- (goddady.api_key is defined) and- (goddady.api_key is not none) and- (goddady.api_key | length > 0) and- (goddady.api_secret is defined) and- (goddady.api_secret is not none) and- (goddady.api_secret | length > 0) }}"- - - name: Migrator | 'backup_config.yml' | Migration 01 | Delete 'goddady' dict- ansible.builtin.shell: |- yyq -i 'del(.goddady)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_godaddy- - - name: Migrator | 'backup_config.yml' | Migration 01 | Rebuild 'godaddy' dict- ansible.builtin.shell: |- yyq -i '.godaddy.api_key = "{{ goddady.api_key }}"' {{ playbook_dir }}/{{ file }}- yyq -i '.godaddy.api_secret = "{{ goddady.api_secret }}"' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_godaddy_filled- - - name: Migrator | 'backup_config.yml' | Migration 01 | Remove 'null' values- ansible.builtin.replace:- path: "{{ playbook_dir }}/{{ file }}"- regexp: '(?<=: )\bnull\s*$'- replace: ''- owner: "{{ saltbox_yml.stat.uid }}"- group: "{{ saltbox_yml.stat.gid }}"- mode: "0664"- - - name: Migrator | 'backup_config.yml' | Migration 01 | Re-import Variables- ansible.builtin.include_vars: "{{ playbook_dir }}/{{ file }}"
removed
roles/settings/tasks/subtasks/migrator/settings_yml/migration_01.yml
- ########################################################################################- # Title: Saltbox: Settings | Migrator | 'settings.yml' | Migration 01 #- # Author(s): salty #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- ########################################################################################- # GNU General Public License v3.0 #- ########################################################################################- - ---- - name: "Migrator | 'settings.yml' | Migration 01 | Set variables"- ansible.builtin.set_fact:- old_download: "{{ not- ((downloads.nzbs is undefined)- or- (downloads.torrents is undefined)) }}"- old_feeder_mount: "{{ (not ((mounts.feeder is undefined)- or- (mounts.feeder is none)- or- (mounts.feeder | trim | length == 0)))- and mounts.feeder }}"- - - name: Migrator | 'settings.yml' | Migration 01 | Rebuild 'downloads' dict- ansible.builtin.shell: |- yyq -i 'del(.downloads)' {{ playbook_dir }}/{{ file }}- yyq -i '.downloads = "/mnt/unionfs/downloads"' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_download- - - name: Migrator | 'settings.yml' | Migration 01 | Delete 'rclone.remote' dict- ansible.builtin.shell: |- yyq -i 'del(.rclone.remote)' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: rclone.remote is defined- - - name: Migrator | 'settings.yml' | Migration 01 | Create 'rclone.remotes' dict- ansible.builtin.shell: |- yyq -i '.rclone.remotes += [{"remote": "{{ rclone.remote }}", "settings": {"mount": true, "template": "google", "union": true, "upload": true, "upload_from": "/mnt/local/Media", "vfs_cache": {"enabled": false, "size": "50G", "max_age": "504h"}}}]' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: (rclone.remotes is undefined) and rclone_old_remote_is_defined- - - name: Migrator | 'settings.yml' | Migration 01 | Create 'rclone.remotes' dict- ansible.builtin.shell: |- yyq -i '.rclone.remotes += [{"remote": "google", "settings": {"mount": true, "template": "google", "union": true, "upload": true, "upload_from": "/mnt/local/Media", "vfs_cache": {"enabled": false, "size": "50G", "max_age": "504h"}}}]' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: (rclone.remotes is undefined) and not rclone_old_remote_is_defined- - - name: Migrator | 'settings.yml' | Migration 01 | Add feeder to remotes- ansible.builtin.shell: |- yyq -i '.rclone.remotes += [{"remote": "feeder", "settings": {"mount": true, "template": "sftp", "union": true, "upload": false, "upload_from": "/mnt/local/Media", "vfs_cache": {"enabled": false, "size": "50G", "max_age": "504h"}}}]' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: old_feeder_mount- - - name: Migrator | 'settings.yml' | Migration 01 | Create 'rclone.enabled' dict- ansible.builtin.shell: |- yyq -i '.rclone.enabled = {{ "true" if (rclone_old_remote_is_defined or rclone_remote_is_defined) else "false" }}' {{ playbook_dir }}/{{ file }}- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- when: (rclone.enabled is undefined)- - - name: Migrator | 'settings.yml' | Migration 01 | Upgrade to new settings format- when: (rclone.remotes is defined) and (rclone.remotes is not none) and (rclone.remotes | length > 0)- block:- - name: Migrator | 'settings.yml' | Migration 01 | Read current config file- ansible.builtin.slurp:- src: /srv/git/saltbox/settings.yml- register: settings_config_content- - - name: Migrator | 'settings.yml' | Migration 01 | Parse the configuration- ansible.builtin.set_fact:- settings_config: "{{ settings_config_content['content'] | b64decode | from_yaml }}"- - - name: Migrator | 'settings.yml' | Migration 01 | Transform the data structure- ansible.builtin.set_fact:- new_remotes: "{{ new_remotes | default([]) + [{'remote': item.remote, 'settings': {'mount': true, 'template': (item.template | default('google')), 'union': true, 'upload': (item.upload | default(false)), 'upload_from': (item.upload_from | default('/mnt/local/Media')), 'vfs_cache': {'enabled': (item.vfs_cache.enabled | default(false)), 'max_age': (item.vfs_cache.max_age | default('504h')), 'size': (item.vfs_cache.size | default('50G'))}}}] }}"- loop: "{{ settings_config.rclone.remotes }}"- when: item.template is defined- - - name: Migrator | 'settings.yml' | Migration 01 | Combine new structure with original config- ansible.builtin.set_fact:- new_config: "{{ (settings_config | combine({'rclone': {'remotes': new_remotes}}, recursive=True)) }}"- when: new_remotes is defined- - - name: Migrator | 'settings.yml' | Migration 01 | Write the new configuration file- ansible.builtin.copy:- dest: /srv/git/saltbox/settings.yml- content: "{{ new_config | to_nice_yaml }}"- owner: "{{ saltbox_yml.stat.uid }}"- group: "{{ saltbox_yml.stat.gid }}"- mode: "0664"- when: new_remotes is defined- - - name: Migrator | 'settings.yml' | Migration 01 | Ensure enable_refresh is set correctly for each remote- when: (settings_config.rclone.remotes is defined) and (settings_config.rclone.remotes is not none) and (settings_config.rclone.remotes | length > 0)- block:- - name: Migrator | 'settings.yml' | Migration 01 | Read current config file- ansible.builtin.slurp:- src: /srv/git/saltbox/settings.yml- register: settings_config_content- - - name: Migrator | 'settings.yml' | Migration 01 | Parse the configuration- ansible.builtin.set_fact:- settings_config: "{{ settings_config_content['content'] | b64decode | from_yaml }}"- - - name: Migrator | 'settings.yml' | Migration 01 | Process each remote- ansible.builtin.set_fact:- updated_remotes: "{{ updated_remotes | default([]) + [item | combine({'settings': item.settings | combine({'enable_refresh': (item.settings.template != 'sftp')}, recursive=True)}, recursive=True)- if 'enable_refresh' not in item.settings- else item] }}"- loop: "{{ settings_config.rclone.remotes }}"- loop_control:- loop_var: item- - - name: Migrator | 'settings.yml' | Migration 01 | Update configuration with new remotes- ansible.builtin.set_fact:- updated_config: "{{ settings_config | combine({'rclone': {'remotes': updated_remotes}}, recursive=True) }}"- - - name: Migrator | 'settings.yml' | Migration 01 | Write updated configuration to file- ansible.builtin.copy:- dest: /srv/git/saltbox/settings.yml- content: "{{ updated_config | to_nice_yaml }}"- owner: "{{ saltbox_yml.stat.uid }}"- group: "{{ saltbox_yml.stat.gid }}"- mode: "0664"- - - name: Migrator | 'settings.yml' | Migration 01 | Convert 'true' to 'yes'- ansible.builtin.command: >- yyq '- (.. | select(tag == "!!bool" and . == true)) |= "yes"- ' /srv/git/saltbox/settings.yml -i- - - name: Migrator | 'settings.yml' | Migration 01 | Convert 'false' to 'no'- ansible.builtin.command: >- yyq '- (.. | select(tag == "!!bool" and . == false)) |= "no"- ' /srv/git/saltbox/settings.yml -i- - - name: Migrator | 'settings.yml' | Migration 01 | Remove 'null' values- ansible.builtin.replace:- path: "{{ playbook_dir }}/{{ file }}"- regexp: '(?<=: )\bnull\s*$'- replace: ''- owner: "{{ saltbox_yml.stat.uid }}"- group: "{{ saltbox_yml.stat.gid }}"- mode: "0664"- - - name: Migrator | 'settings.yml' | Migration 01 | Re-import Variables- ansible.builtin.include_vars: "{{ playbook_dir }}/{{ file }}"
removed
roles/settings/tasks/subtasks/start.yml
- #########################################################################- # Title: Saltbox: Settings | Start #- # Author(s): desimaniac #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: "Start | Install yyq"- ansible.builtin.include_role:- name: yyq- - - name: "Start | Get 'saltbox.yml' info"- ansible.builtin.stat:- path: "{{ playbook_dir }}/saltbox.yml"- register: saltbox_yml- - - name: "Start | Create list of config files"- ansible.builtin.set_fact:- config_files:- - "accounts.yml"- - "settings.yml"- - "adv_settings.yml"- - "backup_config.yml"- - "providers.yml"- - "hetzner_vlan.yml"- traefik3_migration: "{{ true if (rclone.remotes is undefined) else false }}"- - - name: Start | Initialize vars with empty lists- ansible.builtin.set_fact:- files_updated_successfully: []- files_updated_unsuccessfully: []- exit_is_necessary: false- - - name: "Start | Check if 'settings-updater.log' exists"- ansible.builtin.stat:- path: "{{ playbook_dir }}/settings-updater.log"- register: settings_updater_log- - - name: "Start | Reset ownership of 'settings-updater.log'"- ansible.builtin.file:- path: "{{ playbook_dir }}/settings-updater.log"- state: file- owner: "{{ saltbox_yml.stat.uid }}"- group: "{{ saltbox_yml.stat.gid }}"- mode: "0664"- when: settings_updater_log.stat.exists
removed
roles/settings/tasks/subtasks/updater.yml
- #########################################################################- # Title: Saltbox: Settings | Updater #- # Author(s): desimaniac, l3uddz #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: "Updater | Run 'settings-updater.py' for '{{ file }}'"- ansible.builtin.script: "'roles/settings/files/settings-updater.py' '{{ playbook_dir }}' 'defaults/{{ file }}.default' '{{ file }}'"- become: true- become_user: "{{ saltbox_yml.stat.pw_name }}"- register: settings_updater- ignore_errors: true- changed_when: false- failed_when: (settings_updater.rc == 1)- - - name: Updater | Build 'files_updated_successfully' list- ansible.builtin.set_fact:- files_updated_successfully: "{{ files_updated_successfully + [file] }}"- when: (settings_updater.rc == 2)- - - name: Updater | Build 'files_updated_unsuccessfully' list- ansible.builtin.set_fact:- files_updated_unsuccessfully: "{{ files_updated_unsuccessfully + [file] }}"- when: (settings_updater.rc == 1)- - - name: Updater | Set 'exit_is_necessary' variable- ansible.builtin.set_fact:- exit_is_necessary: true- when: (settings_updater.rc == 2)- - - name: Updater | Sort Keys- ansible.builtin.shell: yyq -i 'sort_keys(..)' {{ playbook_dir }}/{{ file }}
removed
roles/sub_zero/defaults/main.yml
- ##########################################################################- # Title: Saltbox: Plex Plugins / Sub-Zero | Default Variables #- # Author(s): desimaniac #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- ##########################################################################- # GNU General Public License v3.0 #- ##########################################################################- ---- ################################- # Paths- ################################- - plex_plugin_subzero_paths_location: "{{ plex_paths_plugins_location }}/Sub-Zero.bundle"- - plex_plugin_subzero_paths_info_plist_location: "{{ plex_plugin_subzero_paths_location }}/Contents/Info.plist"- - ################################- # Repository- ################################- - plex_plugin_subzero_release_url: "{{ svm }}https://api.github.com/repos/pannal/Sub-Zero.bundle/releases/latest"- - plex_plugin_subzero_download_url_backup: https://github.com/pannal/Sub-Zero.bundle/releases/download/2.6.5.3152/Sub-Zero.bundle-2.6.5.3152.zip
removed
roles/sub_zero/tasks/main.yml
- #########################################################################- # Title: Saltbox: Plex Plugins / Sub-Zero #- # Author(s): desimaniac #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: Check if Plex instance is defined- ansible.builtin.set_fact:- plex_name: "{{ plex_name | default(plex_instances[0]) }}"- - - name: Check for previously installed Sub-Zero Plugin- ansible.builtin.stat:- path: "{{ plex_plugin_subzero_paths_info_plist_location }}"- register: plex_plugin_subzero_bundle_status- - - name: Tasks for previously installed Sub-Zero Plugin- when: plex_plugin_subzero_bundle_status.stat.exists- block:- - name: Set default value for 'plex_plugin_subzero_is_outdated' variable- ansible.builtin.set_fact:- plex_plugin_subzero_is_outdated: false- - # https://stackoverflow.com/a/51109708/10975859- - name: Check version of previously installed Sub-Zero- community.general.xml:- path: "{{ plex_plugin_subzero_paths_info_plist_location }}"- xpath: /plist/dict/key[.='CFBundleVersion']/following-sibling::*[1]- content: 'text'- register: plex_plugin_subzero_info_plist_xmlresp_1- - - name: Set 'plex_plugin_subzero_previously_installed_version' variable- ansible.builtin.set_fact:- plex_plugin_subzero_previously_installed_version: "{{ plex_plugin_subzero_info_plist_xmlresp_1.matches[0].string }}"- - - name: Check latest available version for Sub-Zero- ansible.builtin.shell: curl -s {{ plex_plugin_subzero_release_url }} | jq -r .tag_name- register: plex_plugin_subzero_latest_version- ignore_errors: true- changed_when: false- - - name: Compare installed Sub-Zero Plugin version with latest one- ansible.builtin.set_fact:- plex_plugin_subzero_is_outdated: "{{- (plex_plugin_subzero_latest_version is failed) or- ((plex_plugin_subzero_latest_version is success) and- (plex_plugin_subzero_previously_installed_version is version(plex_plugin_subzero_latest_version.stdout, '<'))) }}"- - - name: Install Sub-Zero Plugin- when: (not plex_plugin_subzero_bundle_status.stat.exists) or- (plex_plugin_subzero_bundle_status.stat.exists and plex_plugin_subzero_is_outdated) or- ('plex-plugin-sub-zero-reinstall' in ansible_run_tags) or ('plex-plugin-sub-zero' in ansible_run_tags)- block:- - name: Check to see if {{ plex_name | title }} is running- when: ('plex-plugin-sub-zero' in ansible_run_tags) or ('plex-plugin-sub-zero-reinstall' in ansible_run_tags)- block:- - name: Gather list of running Docker containers- ansible.builtin.shell: "docker ps --format '{{ '{{' }} .Names{{ '}}' }}' | xargs echo -n"- register: docker_running_containers_list- - - name: Set 'docker_running_containers_list' variable- ansible.builtin.set_fact:- docker_running_containers_list: "{{ (docker_running_containers_list.stdout).split() }}"- - - name: "Stop {{ plex_name | title }} container"- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/stop_docker_container.yml"- vars:- var_prefix: "plex"- when: (plex_docker_container in docker_running_containers_list)- - - name: Remove outdated Sub-Zero Plugin- ansible.builtin.file:- path: "{{ plex_plugin_subzero_paths_location }}"- state: absent- - - name: Get latest Sub-Zero Plugin URL- ansible.builtin.shell: |- curl -s {{ plex_plugin_subzero_release_url }} \- | jq -r ".assets[] | select(.name | test(\"Sub-Zero.bundle\")) | .browser_download_url"- register: plex_plugin_subzero_download_url- ignore_errors: true- changed_when: false- - - name: Set 'plex_plugin_subzero_download_url' variable- ansible.builtin.set_fact:- plex_plugin_subzero_download_url: "{{ plex_plugin_subzero_download_url.stdout | default(plex_plugin_subzero_download_url_backup, true) }}"- - - name: "Create {{ plex_name | title }} 'plug-ins' directory"- ansible.builtin.file:- path: "{{ item }}"- state: directory- owner: "{{ user.name }}"- group: "{{ user.name }}"- mode: "0775"- recurse: true- with_items:- - "{{ plex_paths_plugins_location }}"- - - name: Install Sub-Zero Plugin- ansible.builtin.unarchive:- src: "{{ plex_plugin_subzero_download_url }}"- dest: "{{ plex_paths_plugins_location }}"- copy: false- owner: "{{ user.name }}"- group: "{{ user.name }}"- mode: "0775"- validate_certs: false- register: plex_plugin_subzero_download_status- ignore_errors: true- - - name: Post-Successfull Sub-Zero Plugin Download- when: (plex_plugin_subzero_download_status is success)- block:- - name: Check for newly installed Sub-Zero Plugin- ansible.builtin.stat:- path: "{{ plex_plugin_subzero_paths_info_plist_location }}"- register: plex_plugin_subzero_info_plist_status- - - name: Post-Successfull Sub-Zero Plugin Install- when: plex_plugin_subzero_info_plist_status.stat.exists- block:- - name: Check version of newly installed Sub-Zero Plugin- community.general.xml:- path: "{{ plex_plugin_subzero_paths_info_plist_location }}"- xpath: /plist/dict/key[.='CFBundleVersion']/following-sibling::*[1]- content: 'text'- register: plex_plugin_subzero_info_plist_xmlresp_2- - - name: Set 'subzero_newly_installed_version' variable- ansible.builtin.set_fact:- plex_plugin_subzero_newly_installed_version: "{{ plex_plugin_subzero_info_plist_xmlresp_2.matches[0].string }}"- - - name: Display Sub-Zero Plugin version- ansible.builtin.debug:- msg: "Sub-Zero Plugin version {{ plex_plugin_subzero_newly_installed_version }} installed."- - - name: Sub-Zero Plugin install failed- ansible.builtin.debug:- msg: "Sub-Zero Plugin install failed."- when: (plex_plugin_subzero_download_status is failed) or- ((plex_plugin_subzero_download_status is success) and (not plex_plugin_subzero_info_plist_status.stat.exists))- - - name: "Start {{ plex_name | title }} container"- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_docker_container.yml"- vars:- var_prefix: "plex"- when: ('plex-plugin-sub-zero' in ansible_run_tags) or- ('plex-plugin-sub-zero-reinstall' in ansible_run_tags) or- (plex_docker_container in docker_running_containers_list)- - - name: Sub-Zero Plugin was not updated- ansible.builtin.debug:- msg: "Sub-Zero is already the latest version."- when:- - plex_plugin_subzero_bundle_status.stat.exists- - (not plex_plugin_subzero_is_outdated)- - ('plex-plugin-sub-zero-reinstall' not in ansible_run_tags)
removed
roles/webtools/defaults/main.yml
- ##########################################################################- # Title: Saltbox: Plex Plugins / WebTools | Default Variables #- # Author(s): desimaniac #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- ##########################################################################- # GNU General Public License v3.0 #- ##########################################################################- ---- ################################- # Paths- ################################- - plex_plugin_webtools_paths_location: "{{ plex_paths_plugins_location }}/WebTools.bundle"- - plex_plugin_webtools_paths_version_location: "{{ plex_plugin_webtools_paths_location }}/VERSION"- - plex_plugin_webtools_paths_preferences_location: "{{ plex_paths_plugin_support_location }}/Preferences/com.plexapp.plugins.WebTools.xml"- - ################################- # Repository- ################################- - plex_plugin_webtools_release_url: "{{ svm }}https://api.github.com/repos/ukdtom/WebTools.bundle/releases/latest"- - plex_plugin_webtools_download_url_backup: https://github.com/ukdtom/WebTools.bundle/releases/download/3.0.0/WebTools.bundle.zip
removed
roles/webtools/tasks/main.yml
- #########################################################################- # Title: Saltbox: Plex Plugins / WebTools #- # Author(s): desimaniac #- # URL: https://github.com/saltyorg/Saltbox #- # -- #- #########################################################################- # GNU General Public License v3.0 #- #########################################################################- ---- - name: Check if Plex instance is defined- ansible.builtin.set_fact:- plex_name: "{{ plex_name | default(plex_instances[0]) }}"- - - name: Check for previously installed WebTools Plugin- ansible.builtin.stat:- path: "{{ plex_plugin_webtools_paths_version_location }}"- register: plex_plugin_webtools_bundle_status- - - name: Tasks for previously installed WebTools Plugin- when: plex_plugin_webtools_bundle_status.stat.exists- block:- - name: Set default value for 'plex_plugin_webtools_is_outdated' variable- ansible.builtin.set_fact:- plex_plugin_webtools_is_outdated: false- - - name: Check version of previously installed WebTools Plugin- ansible.builtin.shell: cat '{{ plex_plugin_webtools_paths_version_location }}' | head -n 1 | awk '{ print }'- register: plex_plugin_webtools_previously_installed_version- changed_when: false- - - name: Set 'plex_plugin_webtools_previously_installed_version' variable- ansible.builtin.set_fact:- plex_plugin_webtools_previously_installed_version: "{{ plex_plugin_webtools_previously_installed_version.stdout }}"- - - name: Check latest available version for WebTools- ansible.builtin.shell: curl -s {{ plex_plugin_webtools_release_url }} | jq -r .tag_name- register: plex_plugin_webtools_latest_version- ignore_errors: true- changed_when: false- - - name: Compare installed WebTools Plugin version with latest one- ansible.builtin.set_fact:- plex_plugin_webtools_is_outdated: "{{- (plex_plugin_webtools_latest_version is failed) or- ((plex_plugin_webtools_latest_version is success) and- (plex_plugin_webtools_previously_installed_version is version(plex_plugin_webtools_latest_version.stdout, '<', strict=True))) }}"- - - name: Install WebTools Plugin- when: (not plex_plugin_webtools_bundle_status.stat.exists) or- (plex_plugin_webtools_bundle_status.stat.exists and plex_plugin_webtools_is_outdated) or- ('plex-plugin-webtools-reinstall' in ansible_run_tags)- block:- - name: Check to see if {{ plex_name | title }} is running- when: ('plex-plugin-webtools' in ansible_run_tags) or ('plex-plugin-webtools-reinstall' in ansible_run_tags)- block:- - name: Gather list of running Docker containers- ansible.builtin.shell: "docker ps --format '{{ '{{' }} .Names{{ '}}' }}' | xargs echo -n"- register: docker_running_containers_list- changed_when: false- - - name: Set 'docker_running_containers_list' variable- ansible.builtin.set_fact:- docker_running_containers_list: "{{ (docker_running_containers_list.stdout).split() }}"- - - name: Stop {{ plex_name | title }} container- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/stop_docker_container.yml"- vars:- var_prefix: "plex"- - when: (plex_docker_container in docker_running_containers_list)- - - name: Remove outdated WebTools Plugin- ansible.builtin.file:- path: "{{ plex_plugin_webtools_paths_location }}"- state: absent- - - name: Get latest WebTools Plugin URL- ansible.builtin.shell: |- curl -s {{ plex_plugin_webtools_release_url }} \- | jq -r ".assets[] | select(.name | test(\"WebTools.bundle.zip\")) | .browser_download_url"- register: plex_plugin_webtools_download_url- ignore_errors: true- changed_when: false- - - name: Set 'plex_plugin_webtools_download_url' variable- ansible.builtin.set_fact:- plex_plugin_webtools_download_url: "{{ plex_plugin_webtools_download_url.stdout | default(plex_plugin_webtools_download_url_backup, true) }}"- - - name: "Create {{ plex_name | title }} 'plug-ins' directory"- ansible.builtin.file:- path: "{{ item }}"- state: directory- owner: "{{ user.name }}"- group: "{{ user.name }}"- mode: "0775"- recurse: true- with_items:- - "{{ plex_paths_plugins_location }}"- - - name: Install WebTools Plugin- ansible.builtin.unarchive:- src: "{{ plex_plugin_webtools_download_url }}"- dest: "{{ plex_paths_plugins_location }}"- copy: false- owner: "{{ user.name }}"- group: "{{ user.name }}"- mode: "0775"- validate_certs: false- register: plex_plugin_webtools_download_status- ignore_errors: true- - - name: Post-Successfull WebTools Plugin Download- when: (plex_plugin_webtools_download_status is success)- block:- - name: Check for newly installed WebTools Plugin- ansible.builtin.stat:- path: "{{ plex_plugin_webtools_paths_version_location }}"- register: plex_plugin_webtools_version_status- - - name: Post-Successfull WebTools Plugin Install- when: plex_plugin_webtools_version_status.stat.exists- block:- - name: Check version of newly installed WebTools Plugin- ansible.builtin.shell: cat '{{ plex_plugin_webtools_paths_version_location }}' | head -n 1 | awk '{ print }'- register: plex_plugin_webtools_newly_installed_version- changed_when: false- - - name: Set 'plex_plugin_webtools_previously_installed_version' variable- ansible.builtin.set_fact:- plex_plugin_webtools_newly_installed_version: "{{ plex_plugin_webtools_newly_installed_version.stdout }}"- - - name: Display WebTools Plugin version- ansible.builtin.debug:- msg: "WebTools Plugin version {{ plex_plugin_webtools_newly_installed_version }} installed."- - - name: WebTools Plugin install failed- ansible.builtin.debug:- msg: "WebTools Plugin install failed."- when: (plex_plugin_webtools_download_status is failed) or- ((plex_plugin_webtools_download_status is success) and (not plex_plugin_webtools_version_status.stat.exists))- - - name: Start {{ plex_name | title }} container- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_docker_container.yml"- vars:- var_prefix: "plex"- when:- - ('plex-plugin-webtools' in ansible_run_tags) or ('plex-plugin-webtools-reinstall' in ansible_run_tags)- - (plex_docker_container in docker_running_containers_list)- - - name: WebTools Plugin was not updated- ansible.builtin.debug:- msg: "WebTools is already the latest version."- when:- - plex_plugin_webtools_bundle_status.stat.exists- - (not plex_plugin_webtools_is_outdated)- - ('plex-plugin-webtools-reinstall' not in ansible_run_tags)
removed
scripts/salty-linter.py
- import os- import sys- - def lint_ansible_defaults(content, file_path):- errors = []- lines = content.split('\n')- - multi_line_jinja_start = None- within_multi_line_jinja = False- - for line_no, line in enumerate(lines, start=1):- stripped_line = line.strip()- - if stripped_line == '---':- continue- - if '{{' in stripped_line and not within_multi_line_jinja:- multi_line_jinja_start = line.find('{{')- within_multi_line_jinja = '}}' not in stripped_line- - elif within_multi_line_jinja:- if 'if' in stripped_line or 'else' in stripped_line:- if line.find('if') < multi_line_jinja_start and line.find('else') < multi_line_jinja_start:- message = f"'if/else' within Jinja expression should align with the start."- errors.append((line_no, message))- - if '}}' in stripped_line:- within_multi_line_jinja = False- - if '}}' in stripped_line and within_multi_line_jinja:- within_multi_line_jinja = False- - for error in errors:- line_no, message = error- print(f"::warning file={file_path},line={line_no},endLine={line_no},title=Salty Lint Error::{message}")- - return len(errors) > 0- - def crawl_and_lint_ansible_roles(roles_dir):- errors_found = False- - if not os.path.exists(roles_dir):- print("Roles directory does not exist.")- return- - for role_name in os.listdir(roles_dir):- defaults_main_path = os.path.join(roles_dir, role_name, "defaults", "main.yml")- if os.path.isfile(defaults_main_path):- with open(defaults_main_path, 'r') as file:- content = file.read()- if lint_ansible_defaults(content, defaults_main_path):- errors_found = True- - sys.exit(1 if errors_found else 0)- - if len(sys.argv) < 2:- print("Usage: python script.py /path/to/your/ansible/roles")- sys.exit(1)- - roles_directory_path = sys.argv[1]- crawl_and_lint_ansible_roles(roles_directory_path)
modified
.ansible-lint
@@ -12,20 +12,17 @@ offline: false skip_list:- - braces- - git-latest- - no-changed-when- - ignore-errors- - risky-shell-pipe- - package-latest- - no-handler - command-instead-of-module - command-instead-of-shell- - deprecated-command-syntax- - yaml[line-length]- - yaml[truthy]- - schema[tasks]+ - ignore-errors+ - latest[git] - name[casing] - name[template]+ - no-changed-when+ - no-handler+ - package-latest+ - risky-shell-pipe+ - schema[playbook]+ - schema[tasks] - var-naming[no-role-prefix]- - schema[playbook]+ - yaml[line-length]
modified
.github/workflows/retry.yml
@@ -30,13 +30,30 @@ echo "attempt_allowed=false" >> $GITHUB_OUTPUT fi + - name: Check for lint failures+ id: check_lint+ if: steps.check_attempts.outputs.attempt_allowed == 'true'+ env:+ GH_TOKEN: ${{ github.token }}+ GH_REPO: ${{ github.repository }}+ run: |+ failed_jobs=$(gh run view ${{ github.event.workflow_run.id }} --json jobs -q '.jobs[] | select(.conclusion == "failure") | .name')++ if echo "$failed_jobs" | grep -qE '^(ansible-lint|defaults-lint)$'; then+ echo "Lint job failed - skipping retry"+ echo "skip_retry=true" >> $GITHUB_OUTPUT+ else+ echo "No lint failures detected"+ echo "skip_retry=false" >> $GITHUB_OUTPUT+ fi+ - name: Sleep for 60 seconds- if: steps.check_attempts.outputs.attempt_allowed == 'true'+ if: steps.check_attempts.outputs.attempt_allowed == 'true' && steps.check_lint.outputs.skip_retry != 'true' run: sleep 60s shell: bash - name: rerun ${{ github.event.workflow_run.id }}- if: steps.check_attempts.outputs.attempt_allowed == 'true'+ if: steps.check_attempts.outputs.attempt_allowed == 'true' && steps.check_lint.outputs.skip_retry != 'true' env: GH_REPO: ${{ github.repository }} GH_TOKEN: ${{ github.token }}
modified
.github/workflows/saltbox-os.yml
@@ -23,6 +23,19 @@ working-directory: ${{ github.workspace }} run: ansible-lint + defaults-lint:+ if: github.event_name == 'pull_request' || !(github.event_name == 'push' && github.actor == 'renovate[bot]')+ runs-on: ubuntu-24.04+ steps:+ - uses: actions/checkout@v5++ - uses: actions/setup-python@v6+ with:+ python-version: '3.13'++ - name: Run Defaults Linter+ run: python3 scripts/saltbox-defaults-linter.py roles/+ find-roles: if: github.event_name == 'pull_request' || !(github.event_name == 'push' && github.actor == 'renovate[bot]') runs-on: ubuntu-24.04@@ -32,51 +45,52 @@ - uses: actions/checkout@v5 - id: set-matrix run: |- ROLES=$(awk '/# Core/{flag=1;next}/# Apps End/{flag=0}flag' saltbox.yml | awk '!/#/' | awk -F'[][]' '{print $2}' | tr '\n' ',' | sed 's/,*$//' | awk -F',' '{ for( i=1; i<=NF; i++ ) print $i }' | awk '{ gsub(/ /,""); print }'| sort -u | awk -vORS=, '{ print $1 }' | sed 's/,$/\n/' | sed "s/.\(roles\|common\|hetzner\|kernel\|motd\|mounts\|nvidia\|nvidia-purge\|preinstall\|rclone\|scripts\|shell\|system\|traefik\|traefik-reset-certs\|user\|cloudflare\|plex-db\|arr-db\|ddns\|cloudplow\|cloudplow-reset\|btrfsmaintenance\|download-clients\|download-indexers\|media-server\|python\|yyq\|crowdsec\).,//g")- echo "matrix={\"roles\":[$ROLES],\"os\":[\"20.04\",\"22.04\",\"24.04\"]}" >> $GITHUB_OUTPUT+ ROLES=$(awk '/# Core/{flag=1;next}/# Apps End/{flag=0}flag' saltbox.yml | awk '!/#/' | awk -F'[][]' '{print $2}' | tr '\n' ',' | sed 's/,*$//' | awk -F',' '{ for( i=1; i<=NF; i++ ) print $i }' | awk '{ gsub(/ /,""); print }'| sort -u | awk -vORS=, '{ print $1 }' | sed 's/,$/\n/' | sed "s/.\(roles\|common\|hetzner\|kernel\|motd\|mounts\|nvidia\|nvidia-purge\|preinstall\|rclone\|scripts\|shell\|system\|traefik\|traefik-reset-certs\|user\|cloudflare\|plex-db\|arr-db\|ddns\|cloudplow\|cloudplow-reset\|btrfsmaintenance\|download-clients\|download-indexers\|media-server\|python\|yyq\|crowdsec\|postgres-host\).,//g")+ echo "matrix={\"roles\":[$ROLES],\"os\":[\"22.04\",\"24.04\"]}" >> $GITHUB_OUTPUT install: name: '${{ matrix.roles }}-${{ matrix.os }}' runs-on: ubuntu-${{ matrix.os }}- needs: [ansible-lint, find-roles]+ needs: [ansible-lint, defaults-lint, find-roles] strategy: matrix: ${{ fromJson(needs.find-roles.outputs.matrix) }} fail-fast: false steps: - uses: actions/checkout@v5 + - name: Tune GitHub-hosted runner network+ run: sudo ethtool -K eth0 tx off rx off++ - name: Print pip dependencies+ run: cat ./requirements/requirements-saltbox.txt++ - name: Install sb binary+ uses: jaxxstorm/action-install-gh-release@v2.1.0+ with:+ repo: saltyorg/sb-go+ tag: latest+ extension-matching: disable+ platform: linux+ arch: amd64+ rename-to: sb+ chmod: "0755"++ - name: Check sb version+ run: |+ sudo mv /opt/hostedtoolcache/saltyorg/sb-go/latest/linux-amd64/sb /usr/local/bin/sb+ /usr/local/bin/sb version+ - name: Create Directories- run: sudo mkdir -p /srv/ansible /srv/git/sb-- - name: Copy requirements.txt- run: sudo cp ./requirements/requirements-saltbox.txt /srv/git/sb/requirements-saltbox.txt+ run: sudo mkdir -p /srv/git - name: Chown /srv/git run: sudo chown -R runner:runner /srv/git - - name: Tune GitHub-hosted runner network- run: sudo ethtool -K eth0 tx off rx off-- - name: Print pip dependencies- run: cat /srv/git/sb/requirements-saltbox.txt+ - name: Symlink cloned repository to /srv/git/saltbox+ run: ln -s $GITHUB_WORKSPACE /srv/git/saltbox - name: Install Dependencies- run: curl https://raw.githubusercontent.com/saltyorg/sb/master/sb_dep.sh --output sb_dep.sh && sudo bash sb_dep.sh -v && /srv/ansible/venv/bin/ansible --version-- - name: Symlink cloned repository to /srv/git/saltbox- run: sudo ln -s $GITHUB_WORKSPACE /srv/git/saltbox-- - name: Install saltbox.fact- run: |- mkdir -p $GITHUB_WORKSPACE/ansible_facts.d- curl -fsSL https://github.com/saltyorg/ansible-facts/releases/latest/download/saltbox-facts -o $GITHUB_WORKSPACE/ansible_facts.d/saltbox.fact- chmod +x $GITHUB_WORKSPACE/ansible_facts.d/saltbox.fact-- - name: Chown /srv/git- run: sudo chown -R runner:runner /srv/git-- - name: Import default configuration- run: for i in defaults/*; do cp -n $i "$(basename "${i%.*}")"; done+ run: /usr/local/bin/sb gha && /srv/ansible/venv/bin/ansible --version - name: Edit accounts.yml run: sed -i 's/seed/runner/g' accounts.yml@@ -90,6 +104,8 @@ - name: Create basic vars file run: | echo "continuous_integration: true" > vars.yml+ echo "server_appdata_path: /opt2" >> vars.yml+ sudo mkdir -p /opt2 - name: Add Docker Hub info to vars file if: github.repository == 'saltyorg/Saltbox' && github.event.repository.fork == false@@ -98,9 +114,59 @@ echo " token: ${{ secrets.DOCKERHUB_TOKEN }}" >> vars.yml echo " user: ${{ secrets.DOCKERHUB_USERNAME }}" >> vars.yml + - name: Install Saltbox Preinstall+ run: sudo /srv/ansible/venv/bin/ansible-playbook saltbox.yml --tags "preinstall" --skip-tags "settings" --extra-vars "@vars.yml"++ - name: Run saltbox.fact+ run: /srv/git/saltbox/ansible_facts.d/saltbox.fact+ - name: Install Saltbox Core run: sudo /srv/ansible/venv/bin/ansible-playbook saltbox.yml --tags "core" --skip-tags "settings" --extra-vars "@vars.yml" if: ${{ !(contains(matrix.roles, 'saltbox') || contains(matrix.roles, 'feederbox') || contains(matrix.roles, 'mediabox') || contains(matrix.roles, 'core')) }} - name: Install ${{ matrix.roles }} run: sudo /srv/ansible/venv/bin/ansible-playbook saltbox.yml --tags "${{ matrix.roles }}" --skip-tags "settings" --extra-vars "@vars.yml"++ - name: Run Tree on app data folder+ run: sudo tree -aug /opt2++ - name: Inspect all Docker containers+ run: |+ echo "=== Inspecting all Docker containers ==="++ # Get list of all containers (running and stopped)+ ALL_CONTAINERS=$(docker ps -a -q)++ if [ -z "$ALL_CONTAINERS" ]; then+ echo "No containers found on this system"+ exit 0+ fi++ echo "Found containers: $ALL_CONTAINERS"+ echo ""++ # Loop through each container and inspect it+ for container_id in $ALL_CONTAINERS; do+ echo "========================================="+ echo "Inspecting container: $container_id"+ echo "========================================="++ # Get container name and status for better readability+ CONTAINER_NAME=$(docker inspect --format='{{.Name}}' $container_id | sed 's/^\///')+ CONTAINER_STATUS=$(docker inspect --format='{{.State.Status}}' $container_id)++ echo "Container Name: $CONTAINER_NAME"+ echo "Container Status: $CONTAINER_STATUS"+ echo ""+ echo "Full inspection details:"+ echo "----------------------------------------"++ # Print full inspection details+ docker inspect $container_id++ echo ""+ echo "========================================="+ echo ""+ done++ echo "=== Container inspection completed ==="
modified
.github/workflows/saltbox.yml
@@ -55,6 +55,19 @@ working-directory: ${{ github.workspace }} run: ansible-lint + defaults-lint:+ if: github.event_name == 'pull_request' || !(github.event_name == 'push' && github.actor == 'renovate[bot]')+ runs-on: ubuntu-24.04+ steps:+ - uses: actions/checkout@v5++ - uses: actions/setup-python@v6+ with:+ python-version: '3.13'++ - name: Run Defaults Linter+ run: python3 scripts/saltbox-defaults-linter.py roles/+ find-roles: if: github.event_name == 'pull_request' || !(github.event_name == 'push' && github.actor == 'renovate[bot]') runs-on: ubuntu-24.04@@ -63,50 +76,51 @@ steps: - uses: actions/checkout@v5 - id: set-matrix- run: echo "matrix={\"roles\":[$(awk '/# Core/{flag=1;next}/# Apps End/{flag=0}flag' saltbox.yml | awk '!/#/' | awk -F'[][]' '{print $2}' | tr '\n' ',' | sed 's/,*$//' | awk -F',' '{ for( i=1; i<=NF; i++ ) print $i }' | awk '{ gsub(/ /,""); print }'| sort -u | awk -vORS=, '{ print $1 }' | sed 's/,$/\n/' | sed "s/.\(roles\|common\|hetzner\|kernel\|motd\|motd-generate-config\|mounts\|nvidia\|nvidia-purge\|preinstall\|rclone\|scripts\|shell\|system\|traefik\|traefik-reset-certs\|user\|cloudflare\|plex-db\|arr-db\|ddns\|cloudplow\|cloudplow-reset\|btrfsmaintenance\|download-clients\|download-indexers\|media-server\|python\|yyq\|crowdsec\).,//g")]}" >> $GITHUB_OUTPUT+ run: echo "matrix={\"roles\":[$(awk '/# Core/{flag=1;next}/# Apps End/{flag=0}flag' saltbox.yml | awk '!/#/' | awk -F'[][]' '{print $2}' | tr '\n' ',' | sed 's/,*$//' | awk -F',' '{ for( i=1; i<=NF; i++ ) print $i }' | awk '{ gsub(/ /,""); print }'| sort -u | awk -vORS=, '{ print $1 }' | sed 's/,$/\n/' | sed "s/.\(roles\|common\|docker\|hetzner\|kernel\|motd\|motd-generate-config\|mounts\|nvidia\|nvidia-purge\|preinstall\|rclone\|scripts\|shell\|system\|traefik\|traefik-reset-certs\|user\|cloudflare\|plex-db\|arr-db\|ddns\|cloudplow\|cloudplow-reset\|btrfsmaintenance\|download-clients\|download-indexers\|media-server\|python\|yyq\|crowdsec\|postgres-host\).,//g")]}" >> $GITHUB_OUTPUT install: name: '${{ matrix.roles }}' runs-on: ubuntu-24.04- needs: [ansible-lint, find-roles]+ needs: [ansible-lint, defaults-lint, find-roles] strategy: matrix: ${{ fromJson(needs.find-roles.outputs.matrix) }} fail-fast: false steps: - uses: actions/checkout@v5 + - name: Tune GitHub-hosted runner network+ run: sudo ethtool -K eth0 tx off rx off++ - name: Print pip dependencies+ run: cat ./requirements/requirements-saltbox.txt++ - name: Install sb binary+ uses: jaxxstorm/action-install-gh-release@v2.1.0+ with:+ repo: saltyorg/sb-go+ tag: latest+ extension-matching: disable+ platform: linux+ arch: amd64+ rename-to: sb+ chmod: "0755"++ - name: Check sb version+ run: |+ sudo mv /opt/hostedtoolcache/saltyorg/sb-go/latest/linux-amd64/sb /usr/local/bin/sb+ /usr/local/bin/sb version+ - name: Create Directories- run: sudo mkdir -p /srv/ansible /srv/git/sb-- - name: Copy requirements.txt- run: sudo cp ./requirements/requirements-saltbox.txt /srv/git/sb/requirements-saltbox.txt+ run: sudo mkdir -p /srv/git - name: Chown /srv/git run: sudo chown -R runner:runner /srv/git - - name: Tune GitHub-hosted runner network- run: sudo ethtool -K eth0 tx off rx off-- - name: Print pip dependencies- run: cat /srv/git/sb/requirements-saltbox.txt+ - name: Symlink cloned repository to /srv/git/saltbox+ run: ln -s $GITHUB_WORKSPACE /srv/git/saltbox - name: Install Dependencies- run: curl https://raw.githubusercontent.com/saltyorg/sb/master/sb_dep.sh --output sb_dep.sh && sudo bash sb_dep.sh -v && /srv/ansible/venv/bin/ansible --version-- - name: Symlink cloned repository to /srv/git/saltbox- run: sudo ln -s $GITHUB_WORKSPACE /srv/git/saltbox-- - name: Install saltbox.fact- run: |- mkdir -p $GITHUB_WORKSPACE/ansible_facts.d- curl -fsSL https://github.com/saltyorg/ansible-facts/releases/latest/download/saltbox-facts -o $GITHUB_WORKSPACE/ansible_facts.d/saltbox.fact- chmod +x $GITHUB_WORKSPACE/ansible_facts.d/saltbox.fact-- - name: Chown /srv/git- run: sudo chown -R runner:runner /srv/git-- - name: Import default configuration- run: for i in defaults/*; do cp -n $i "$(basename "${i%.*}")"; done+ run: /usr/local/bin/sb gha && /srv/ansible/venv/bin/ansible --version - name: Edit accounts.yml run: sed -i 's/seed/runner/g' accounts.yml@@ -120,6 +134,8 @@ - name: Create basic vars file run: | echo "continuous_integration: true" > vars.yml+ echo "server_appdata_path: /opt2" >> vars.yml+ sudo mkdir -p /opt2 - name: Add Docker Hub info to vars file if: github.repository == 'saltyorg/Saltbox' && github.event.repository.fork == false@@ -128,9 +144,59 @@ echo " token: ${{ secrets.DOCKERHUB_TOKEN }}" >> vars.yml echo " user: ${{ secrets.DOCKERHUB_USERNAME }}" >> vars.yml + - name: Install Saltbox Preinstall+ run: sudo /srv/ansible/venv/bin/ansible-playbook saltbox.yml --tags "preinstall" --skip-tags "settings" --extra-vars "@vars.yml"++ - name: Run saltbox.fact+ run: /srv/git/saltbox/ansible_facts.d/saltbox.fact+ - name: Install Saltbox Core run: sudo /srv/ansible/venv/bin/ansible-playbook saltbox.yml --tags "core" --skip-tags "settings" --extra-vars "@vars.yml" if: ${{ !(contains(matrix.roles, 'saltbox') || contains(matrix.roles, 'feederbox') || contains(matrix.roles, 'mediabox') || contains(matrix.roles, 'core')) }} - name: Install ${{ matrix.roles }} run: sudo /srv/ansible/venv/bin/ansible-playbook saltbox.yml --tags "${{ matrix.roles }}" --skip-tags "settings" --extra-vars "@vars.yml"++ - name: Run Tree on app data folder+ run: sudo tree -aug /opt2++ - name: Inspect all Docker containers+ run: |+ echo "=== Inspecting all Docker containers ==="++ # Get list of all containers (running and stopped)+ ALL_CONTAINERS=$(docker ps -a -q)++ if [ -z "$ALL_CONTAINERS" ]; then+ echo "No containers found on this system"+ exit 0+ fi++ echo "Found containers: $ALL_CONTAINERS"+ echo ""++ # Loop through each container and inspect it+ for container_id in $ALL_CONTAINERS; do+ echo "========================================="+ echo "Inspecting container: $container_id"+ echo "========================================="++ # Get container name and status for better readability+ CONTAINER_NAME=$(docker inspect --format='{{.Name}}' $container_id | sed 's/^\///')+ CONTAINER_STATUS=$(docker inspect --format='{{.State.Status}}' $container_id)++ echo "Container Name: $CONTAINER_NAME"+ echo "Container Status: $CONTAINER_STATUS"+ echo ""+ echo "Full inspection details:"+ echo "----------------------------------------"++ # Print full inspection details+ docker inspect $container_id++ echo ""+ echo "========================================="+ echo ""+ done++ echo "=== Container inspection completed ==="
modified
.gitignore
@@ -3,6 +3,13 @@ ### Python ### *.pyc+*.py[cod]+*$py.class+/__pycache__/+/library/__pycache__/+/scripts/__pycache__/+/lookup_plugins/__pycache__/+/filter_plugins/__pycache__/ ### Ansible ### /backup.lock@@ -34,4 +41,5 @@ /.idea /*.log /*.log.*-.vscode/settings.json+.vscode/+.claude/
modified
README.md
@@ -101,19 +101,63 @@ </a> </td> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">+ <a href=https://github.com/jonathanfinley>+ <img src=https://avatars.githubusercontent.com/u/23283167?v=4 width="100;" alt=jonathanfinley/>+ <br />+ <sub style="font-size:14px"><b>jonathanfinley</b></sub>+ </a>+ </td>+</tr>+<tr>+ <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <a href=https://github.com/chazlarson> <img src=https://avatars.githubusercontent.com/u/3865541?v=4 width="100;" alt=Chaz Larson/> <br /> <sub style="font-size:14px"><b>Chaz Larson</b></sub> </a> </td>-</tr>-<tr>- <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">- <a href=https://github.com/jonathanfinley>- <img src=https://avatars.githubusercontent.com/u/23283167?v=4 width="100;" alt=jonathanfinley/>- <br />- <sub style="font-size:14px"><b>jonathanfinley</b></sub>+ <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">+ <a href=https://github.com/RXWatcher>+ <img src=https://avatars.githubusercontent.com/u/14085001?v=4 width="100;" alt=RXWatcher/>+ <br />+ <sub style="font-size:14px"><b>RXWatcher</b></sub>+ </a>+ </td>+ <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">+ <a href=https://github.com/JigSawFr>+ <img src=https://avatars.githubusercontent.com/u/5781907?v=4 width="100;" alt=JigSaw/>+ <br />+ <sub style="font-size:14px"><b>JigSaw</b></sub>+ </a>+ </td>+ <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">+ <a href=https://github.com/andrewkhunn>+ <img src=https://avatars.githubusercontent.com/u/116436?v=4 width="100;" alt=Andrew Hunn/>+ <br />+ <sub style="font-size:14px"><b>Andrew Hunn</b></sub>+ </a>+ </td>+ <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">+ <a href=https://github.com/BeansIsFat>+ <img src=https://avatars.githubusercontent.com/u/24848012?v=4 width="100;" alt=Beans Baxter/>+ <br />+ <sub style="font-size:14px"><b>Beans Baxter</b></sub>+ </a>+ </td>+ <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">+ <a href=https://github.com/maximuskowalski>+ <img src=https://avatars.githubusercontent.com/u/13492750?v=4 width="100;" alt=Max Kowalski/>+ <br />+ <sub style="font-size:14px"><b>Max Kowalski</b></sub>+ </a>+ </td>+</tr>+<tr>+ <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">+ <a href=https://github.com/TABLE272>+ <img src=https://avatars.githubusercontent.com/u/11992630?v=4 width="100;" alt=TABLE272/>+ <br />+ <sub style="font-size:14px"><b>TABLE272</b></sub> </a> </td> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">@@ -124,54 +168,17 @@ </a> </td> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">- <a href=https://github.com/kbgvirus>- <img src=https://avatars.githubusercontent.com/u/20810766?v=4 width="100;" alt=Alon Nitzan/>- <br />- <sub style="font-size:14px"><b>Alon Nitzan</b></sub>- </a>- </td>- <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">- <a href=https://github.com/RXWatcher>- <img src=https://avatars.githubusercontent.com/u/14085001?v=4 width="100;" alt=RXWatcher/>- <br />- <sub style="font-size:14px"><b>RXWatcher</b></sub>- </a>- </td>- <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">- <a href=https://github.com/JigSawFr>- <img src=https://avatars.githubusercontent.com/u/5781907?v=4 width="100;" alt=JigSaw/>- <br />- <sub style="font-size:14px"><b>JigSaw</b></sub>- </a>- </td>- <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">- <a href=https://github.com/TABLE272>- <img src=https://avatars.githubusercontent.com/u/11992630?v=4 width="100;" alt=TABLE272/>- <br />- <sub style="font-size:14px"><b>TABLE272</b></sub>- </a>- </td>-</tr>-<tr>- <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">- <a href=https://github.com/maximuskowalski>- <img src=https://avatars.githubusercontent.com/u/13492750?v=4 width="100;" alt=Max Kowalski/>- <br />- <sub style="font-size:14px"><b>Max Kowalski</b></sub>- </a>- </td>- <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">- <a href=https://github.com/BeansIsFat>- <img src=https://avatars.githubusercontent.com/u/24848012?v=4 width="100;" alt=Beans Baxter/>- <br />- <sub style="font-size:14px"><b>Beans Baxter</b></sub>- </a>- </td>- <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">- <a href=https://github.com/andrewkhunn>- <img src=https://avatars.githubusercontent.com/u/116436?v=4 width="100;" alt=Andrew Hunn/>- <br />- <sub style="font-size:14px"><b>Andrew Hunn</b></sub>+ <a href=https://github.com/lonix>+ <img src=https://avatars.githubusercontent.com/u/2330355?v=4 width="100;" alt=Stian Buch Larsen/>+ <br />+ <sub style="font-size:14px"><b>Stian Buch Larsen</b></sub>+ </a>+ </td>+ <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">+ <a href=https://github.com/Migz93>+ <img src=https://avatars.githubusercontent.com/u/33037112?v=4 width="100;" alt=Migz93/>+ <br />+ <sub style="font-size:14px"><b>Migz93</b></sub> </a> </td> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">@@ -182,28 +189,14 @@ </a> </td> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">- <a href=https://github.com/Migz93>- <img src=https://avatars.githubusercontent.com/u/33037112?v=4 width="100;" alt=Migz93/>- <br />- <sub style="font-size:14px"><b>Migz93</b></sub>- </a>- </td>- <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">- <a href=https://github.com/lonix>- <img src=https://avatars.githubusercontent.com/u/2330355?v=4 width="100;" alt=Stian Buch Larsen/>- <br />- <sub style="font-size:14px"><b>Stian Buch Larsen</b></sub>- </a>- </td>-</tr>-<tr>- <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <a href=https://github.com/Aethenn> <img src=https://avatars.githubusercontent.com/u/58144688?v=4 width="100;" alt=Aethenn/> <br /> <sub style="font-size:14px"><b>Aethenn</b></sub> </a> </td>+</tr>+<tr> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <a href=https://github.com/horjulf> <img src=https://avatars.githubusercontent.com/u/6215635?v=4 width="100;" alt=Filipe/>@@ -239,8 +232,6 @@ <sub style="font-size:14px"><b>Patrick Sindelka</b></sub> </a> </td>-</tr>-<tr> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <a href=https://github.com/bandwith> <img src=https://avatars.githubusercontent.com/u/62576?v=4 width="100;" alt=bandwith/>@@ -248,6 +239,8 @@ <sub style="font-size:14px"><b>bandwith</b></sub> </a> </td>+</tr>+<tr> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <a href=https://github.com/powerdude> <img src=https://avatars.githubusercontent.com/u/780882?v=4 width="100;" alt=powerdude/>
modified
ansible.cfg
@@ -2,6 +2,7 @@ inventory = ./inventories/local roles_path = roles:resources/roles filter_plugins = ./filter_plugins+lookup_plugins = ./lookup_plugins library = ./library log_path = ./saltbox.log callbacks_enabled = profile_tasks
modified
filter_plugins/rclone_filters.py
@@ -1,4 +1,6 @@ def filter_rclone_remote_name(item):+ # 'name' is an optional field in settings.yml, only used when template is 'nfs'.+ # If not provided, falls back to extracting the name from the remote string. if 'settings' in item and 'name' in item['settings'] and item['settings']['template'] == 'nfs': return item['settings']['name'] else:
modified
inventories/group_vars/all.yml
@@ -43,7 +43,7 @@ cloudflare_records_enabled: "{{ cloudflare_is_enabled }}" -plex_account_lookup: "{{ '/opt/saltbox/plex.ini' | check_plex_ini(plex_instances[0]) }}"+plex_account_lookup: "{{ (server_appdata_path + '/saltbox/plex.ini') | check_plex_ini(plex_instances[0]) }}" plex_account_is_enabled: "{{ plex_account_lookup.exists and (plex_account_lookup.identifier | length > 0) and@@ -83,12 +83,12 @@ (rclone.remote is not none) and (rclone.remote | trim | length > 0) }}" +rclone_mounts_ipv4_only: "{{ mounts.ipv4_only | bool }}"+ use_cloudplow: "{{ rclone_remote_is_defined and use_remote }}"- use_remote: "{{ (rclone.enabled | bool) and not continuous_integration }}"- use_motd: "{{ motd_install | bool }}"-+use_intel: "{{ gpu.intel | bool }}" use_nvidia: "{{ nvidia_enabled | bool }}" ################################@@ -96,8 +96,8 @@ ################################ server_root_path: "/"- server_appdata_path: "{{ server_root_path | regex_replace('\\/$', '') + '/opt' }}"+server_local_folder_path: "/mnt/local" ################################ # Paths@@ -119,20 +119,24 @@ # User ################################ -uid: "{{ ansible_local.saltbox.users[user.name].uid }}"-gid: "{{ ansible_local.saltbox.users[user.name].gid }}"-vgid: "{{ ansible_local.saltbox.groups.video.gid }}"-rgid: "{{ ansible_local.saltbox.groups.render.gid }}"-dockergid: "{{ ansible_local.saltbox.groups.docker.gid }}"+uid: "{{ ansible_facts['ansible_local']['saltbox']['users'][user.name]['uid'] }}"+gid: "{{ ansible_facts['ansible_local']['saltbox']['users'][user.name]['gid'] }}"+vgid: "{{ ansible_facts['ansible_local']['saltbox']['groups']['video']['gid'] }}"+rgid: "{{ ansible_facts['ansible_local']['saltbox']['groups']['render']['gid'] }}"+dockergid: "{{ ansible_facts['ansible_local']['saltbox']['groups']['docker']['gid'] }}" ################################ # Timezone ################################ tz: "{{ system.timezone- if system is defined and system.timezone is defined and 'auto' not in system.timezone | lower- else ansible_local.saltbox.timezone.timezone- if ansible_local is defined and ansible_local.saltbox.timezone is defined and ansible_local.saltbox.timezone.timezone is defined and ansible_local.saltbox.timezone.timezone | trim | length > 0+ if (system is defined) and (system.timezone is defined) and ('auto' not in system.timezone | lower)+ else ansible_facts['ansible_local']['saltbox']['timezone']['timezone']+ if (ansible_facts['ansible_local'])+ and (ansible_facts['ansible_local']['saltbox'] is defined)+ and (ansible_facts['ansible_local']['saltbox']['timezone'] is defined)+ and (ansible_facts['ansible_local']['saltbox']['timezone']['timezone'] is defined)+ and (ansible_facts['ansible_local']['saltbox']['timezone']['timezone'] | trim | length > 0) else 'Etc/UTC' }}" ################################@@ -141,12 +145,16 @@ ip_address_host: "0.0.0.0" ip_address_localhost: "127.0.0.1"-ip_address_public: "{{ ansible_local.saltbox.ip.public_ip }}"-ip_address_public_is_valid: "{{ false if ansible_local.saltbox.ip.failed_ipv4 else true }}"-ip_address_public_error: "{{ ansible_local.saltbox.ip.error_ipv4 }}"-ipv6_address_public: "{{ ansible_local.saltbox.ip.public_ipv6 }}"-ipv6_address_public_is_valid: "{{ false if ansible_local.saltbox.ip.failed_ipv6 else true }}"-ipv6_address_public_error: "{{ ansible_local.saltbox.ip.error_ipv6 }}"+ip_address_public: "{{ ansible_facts['ansible_local']['saltbox']['ip']['public_ip'] }}"+ip_address_public_is_valid: "{{ false+ if ansible_facts['ansible_local']['saltbox']['ip']['failed_ipv4']+ else true }}"+ip_address_public_error: "{{ ansible_facts['ansible_local']['saltbox']['ip']['error_ipv4'] }}"+ipv6_address_public: "{{ ansible_facts['ansible_local']['saltbox']['ip']['public_ipv6'] }}"+ipv6_address_public_is_valid: "{{ false+ if ansible_facts['ansible_local']['saltbox']['ip']['failed_ipv6']+ else true }}"+ipv6_address_public_error: "{{ ansible_facts['ansible_local']['saltbox']['ip']['error_ipv6'] }}" ################################ # Theme@@ -191,37 +199,39 @@ traefik_default_certresolver: "{{ traefik_default_certprovider if (not (traefik_http | bool)) else (traefik_default_resolver if not zerossl_is_enabled else traefik_default_zerossl_resolver) }}"+traefik_role_wildcard_enabled: "{{ lookup('role_var', '_traefik_wildcard_enabled', default=(not traefik_http)) | bool }}"+ traefik_error_pages_enabled: "{{ traefik.error_pages | bool }}"-traefik_error_pages_role_enabled: "{{ lookup('vars', traefik_role_var + '_traefik_error_pages_enabled', default=lookup('vars', role_name + '_traefik_error_pages_enabled', default=false)) | bool }}"-traefik_error_pages_middleware: "{{ 'error-pages-middleware@docker,'- if traefik_error_pages_enabled and traefik_error_pages_role_enabled+traefik_error_pages_role_enabled: "{{ lookup('role_var', '_traefik_error_pages_enabled', default=false) | bool }}"+traefik_error_pages_middleware: "{{ 'error-pages-middleware@docker'+ if (traefik_error_pages_enabled and traefik_error_pages_role_enabled) else '' }}" traefik_default_robot: true traefik_default_gzip: false traefik_default_autodetect: false-traefik_default_cloudflarewarp: "{{ lookup('vars', traefik_role_var + '_dns_proxy', default=lookup('vars', role_name + '_dns_proxy', default=dns.proxied)) and cloudflare_is_enabled and traefik_plugin_cloudflarewarp_enabled }}"+traefik_default_cloudflarewarp: "{{ lookup('role_var', '_dns_proxy', default=dns_proxied) and cloudflare_is_enabled and traefik_plugin_cloudflarewarp_enabled }}" traefik_default_middleware_default: "{{ traefik_error_pages_middleware + 'globalHeaders@file,secureHeaders@file' + (',autodetect@docker'- if (lookup('vars', traefik_role_var + '_traefik_autodetect_enabled', default=lookup('vars', role_name + '_traefik_autodetect_enabled', default=traefik_default_autodetect)) | bool)+ if (lookup('role_var', '_traefik_autodetect_enabled', default=traefik_default_autodetect) | bool) else '') + (',gzip@docker'- if (lookup('vars', traefik_role_var + '_traefik_gzip_enabled', default=lookup('vars', role_name + '_traefik_gzip_enabled', default=traefik_default_gzip)) | bool)+ if (lookup('role_var', '_traefik_gzip_enabled', default=traefik_default_gzip) | bool) else '') + (',robotHeaders@file'- if (lookup('vars', traefik_role_var + '_traefik_robot_enabled', default=lookup('vars', role_name + '_traefik_robot_enabled', default=traefik_default_robot)) | bool)+ if (lookup('role_var', '_traefik_robot_enabled', default=traefik_default_robot) | bool) else '') + (',hsts@file' if (traefik.hsts | bool) else '') + (',cloudflarewarp@docker'- if (traefik_default_cloudflarewarp | bool)+ if traefik_default_cloudflarewarp else '') + (',crowdsec@docker'- if (lookup('vars', traefik_role_var + '_traefik_crowdsec_enabled', default=lookup('vars', role_name + '_traefik_crowdsec_enabled', default=crowdsec_is_enabled)))+ if (lookup('role_var', '_traefik_crowdsec_enabled', default=crowdsec_is_enabled)) else '') + (',' if (traefik_role_middleware_sso | length > 0) else '')@@ -232,71 +242,80 @@ if (not traefik_default_middleware_custom.startswith(',') and traefik_default_middleware_custom | length > 0) else traefik_default_middleware_custom) }}" -traefik_role_middleware_sso: "{{ lookup('vars', traefik_role_var + '_traefik_sso_middleware', default=lookup('vars', role_name + '_traefik_sso_middleware', default='')) }}"-traefik_role_middleware_default: "{{ lookup('vars', traefik_role_var + '_traefik_middleware_default', default=lookup('vars', role_name + '_traefik_middleware_default', default='')) }}"-traefik_role_middleware_custom: "{{ lookup('vars', traefik_role_var + '_traefik_middleware_custom', default=lookup('vars', role_name + '_traefik_middleware_custom', default='')) }}"+traefik_role_middleware_sso: "{{ lookup('role_var', '_traefik_sso_middleware', default='') }}"+traefik_role_middleware_default: "{{ lookup('role_var', '_traefik_middleware_default', default='') }}"+traefik_role_middleware_custom: "{{ lookup('role_var', '_traefik_middleware_custom', default='') }}" traefik_middleware: "{{ traefik_role_middleware_default + (',' + traefik_role_middleware_custom if (not traefik_role_middleware_custom.startswith(',') and traefik_role_middleware_custom | length > 0) else traefik_role_middleware_custom) }}" -traefik_default_middleware_default_http: "{{ traefik_error_pages_middleware }}globalHeaders@file,redirect-to-https@docker"+traefik_default_middleware_default_http:+ - "{{ traefik_error_pages_middleware }}"+ - "globalHeaders@file"+ - "{{ (''+ if (lookup('role_var', '_traefik_middleware_http_insecure', default=false) | bool)+ else 'redirect-to-https@docker') }}" traefik_default_middleware_custom_http: ""-traefik_default_middleware_http: "{{ traefik_default_middleware_default_http+traefik_default_middleware_http: "{{ (traefik_default_middleware_default_http | select() | join(',')) + (',' + traefik_default_middleware_custom_http if (not traefik_default_middleware_custom_http.startswith(',') and traefik_default_middleware_custom_http | length > 0) else traefik_default_middleware_custom_http) + (',autodetect@docker'- if (lookup('vars', traefik_role_var + '_traefik_autodetect_enabled', default=lookup('vars', role_name + '_traefik_autodetect_enabled', default=traefik_default_autodetect)) | bool)+ if (lookup('role_var', '_traefik_autodetect_enabled', default=traefik_default_autodetect) | bool) else '') + (',gzip@docker'- if (lookup('vars', traefik_role_var + '_traefik_gzip_enabled', default=lookup('vars', role_name + '_traefik_gzip_enabled', default=traefik_default_gzip)) | bool)+ if (lookup('role_var', '_traefik_gzip_enabled', default=traefik_default_gzip) | bool) else '') + (',robotHeaders@file'- if (lookup('vars', traefik_role_var + '_traefik_robot_enabled', default=lookup('vars', role_name + '_traefik_robot_enabled', default=traefik_default_robot)) | bool)+ if (lookup('role_var', '_traefik_robot_enabled', default=traefik_default_robot) | bool) else '') + (',cloudflarewarp@docker' if (traefik_default_cloudflarewarp | bool) else '') + (',crowdsec@docker'- if (lookup('vars', traefik_role_var + '_traefik_crowdsec_enabled', default=lookup('vars', role_name + '_traefik_crowdsec_enabled', default=crowdsec_is_enabled)) | bool)+ if (lookup('role_var', '_traefik_crowdsec_enabled', default=crowdsec_is_enabled) | bool) else '') + (',' if (traefik_role_middleware_sso | length > 0) else '') + traefik_role_middleware_sso }}" -traefik_default_middleware_default_http_api: "globalHeaders@file,redirect-to-https@docker"+traefik_default_middleware_default_http_api:+ - "globalHeaders@file"+ - "{{ (''+ if (lookup('role_var', '_traefik_middleware_http_api_insecure', default=false) | bool)+ else 'redirect-to-https@docker') }}" traefik_default_middleware_custom_http_api: ""-traefik_default_middleware_http_api: "{{ traefik_default_middleware_default_http_api+traefik_default_middleware_http_api: "{{ (traefik_default_middleware_default_http_api | select() | join(',')) + (',' + traefik_default_middleware_custom_http_api if (not traefik_default_middleware_custom_http_api.startswith(',') and traefik_default_middleware_custom_http_api | length > 0) else traefik_default_middleware_custom_http_api) + (',autodetect@docker'- if (lookup('vars', traefik_role_var + '_traefik_autodetect_enabled', default=lookup('vars', role_name + '_traefik_autodetect_enabled', default=traefik_default_autodetect)) | bool)+ if (lookup('role_var', '_traefik_autodetect_enabled', default=traefik_default_autodetect) | bool) else '') + (',gzip@docker'- if (lookup('vars', traefik_role_var + '_traefik_gzip_enabled', default=lookup('vars', role_name + '_traefik_gzip_enabled', default=traefik_default_gzip)) | bool)+ if (lookup('role_var', '_traefik_gzip_enabled', default=traefik_default_gzip) | bool) else '') + (',robotHeaders@file'- if (lookup('vars', traefik_role_var + '_traefik_robot_enabled', default=lookup('vars', role_name + '_traefik_robot_enabled', default=traefik_default_robot)) | bool)+ if (lookup('role_var', '_traefik_robot_enabled', default=traefik_default_robot) | bool) else '') + (',cloudflarewarp@docker' if (traefik_default_cloudflarewarp | bool) else '') + (',crowdsec@docker'- if (lookup('vars', traefik_role_var + '_traefik_crowdsec_enabled', default=lookup('vars', role_name + '_traefik_crowdsec_enabled', default=crowdsec_is_enabled)) | bool)+ if (lookup('role_var', '_traefik_crowdsec_enabled', default=crowdsec_is_enabled) | bool) else '') }}" traefik_default_middleware_default_api: "{{ 'globalHeaders@file,secureHeaders@file' + (',autodetect@docker'- if (lookup('vars', traefik_role_var + '_traefik_autodetect_enabled', default=lookup('vars', role_name + '_traefik_autodetect_enabled', default=traefik_default_autodetect)) | bool)+ if (lookup('role_var', '_traefik_autodetect_enabled', default=traefik_default_autodetect) | bool) else '') + (',gzip@docker'- if (lookup('vars', traefik_role_var + '_traefik_gzip_enabled', default=lookup('vars', role_name + '_traefik_gzip_enabled', default=traefik_default_gzip)) | bool)+ if (lookup('role_var', '_traefik_gzip_enabled', default=traefik_default_gzip) | bool) else '') + (',robotHeaders@file'- if (lookup('vars', traefik_role_var + '_traefik_robot_enabled', default=lookup('vars', role_name + '_traefik_robot_enabled', default=traefik_default_robot)) | bool)+ if (lookup('role_var', '_traefik_robot_enabled', default=traefik_default_robot) | bool) else '') + (',hsts@file' if (traefik.hsts | bool)@@ -305,7 +324,7 @@ if (traefik_default_cloudflarewarp | bool) else '') + (',crowdsec@docker'- if (lookup('vars', traefik_role_var + '_traefik_crowdsec_enabled', default=lookup('vars', role_name + '_traefik_crowdsec_enabled', default=crowdsec_is_enabled)) | bool)+ if (lookup('role_var', '_traefik_crowdsec_enabled', default=crowdsec_is_enabled) | bool) else '') }}" traefik_default_middleware_custom_api: "" traefik_default_middleware_api: "{{ traefik_default_middleware_default_api@@ -315,52 +334,52 @@ traefik_default_sso_middleware: "authelia@docker" -traefik_role_enabled: "{{ lookup('vars', traefik_role_var + '_traefik_enabled', default=lookup('vars', role_name + '_traefik_enabled', default=false)) | bool }}"-traefik_role_api_enabled: "{{ lookup('vars', traefik_role_var + '_traefik_api_enabled', default=lookup('vars', role_name + '_traefik_api_enabled', default=false)) | bool }}"+traefik_role_enabled: "{{ lookup('role_var', '_traefik_enabled', default=false) | bool }}"+traefik_role_api_enabled: "{{ lookup('role_var', '_traefik_api_enabled', default=false) | bool }}" traefik_http: "{{ false if ((traefik_challenge_provider != 'cloudflare') or cloudflare_is_enabled) else true }}" -traefik_subdomain: "{{ lookup('vars', traefik_role_var + '_web_subdomain', default=lookup('vars', role_name + '_web_subdomain', default=omit)) }}"-traefik_domain: "{{ lookup('vars', traefik_role_var + '_web_domain', default=lookup('vars', role_name + '_web_domain', default=omit)) }}"+traefik_subdomain: "{{ lookup('role_var', '_web_subdomain', default=omit) }}"+traefik_domain: "{{ lookup('role_var', '_web_domain', default=omit) }}" traefik_router: "{{ lookup('vars', traefik_role_var + '_name', default=lookup('vars', role_name + '_name', default=omit)) }}" -traefik_loadbalancer_port: "{{ lookup('vars', traefik_role_var + '_web_port', default=lookup('vars', role_name + '_web_port', default=omit)) }}"-traefik_loadbalancer_scheme: "{{ lookup('vars', traefik_role_var + '_web_scheme', default=lookup('vars', role_name + '_web_scheme', default=omit)) }}"+traefik_loadbalancer_port: "{{ lookup('role_var', '_web_port', default=omit) }}"+traefik_loadbalancer_scheme: "{{ lookup('role_var', '_web_scheme', default=omit) }}" traefik_host: "{{ traefik_subdomain + '.' + traefik_domain if (traefik_subdomain | length > 0) else traefik_domain }}" -traefik_host_override_lookup: "{{ lookup('vars', traefik_role_var + '_web_host_override', default=lookup('vars', role_name + '_web_host_override', default='')) }}"-traefik_fqdn_override_lookup: "{{ lookup('vars', traefik_role_var + '_web_fqdn_override', default=lookup('vars', role_name + '_web_fqdn_override', default='')) }}"+traefik_host_override_lookup: "{{ lookup('role_var', '_web_host_override', default='') }}"+traefik_fqdn_override_lookup: "{{ lookup('role_var', '_web_fqdn_override', default=[]) }}" traefik_host_template: "{{ traefik_host | traefik_host_rule(traefik_host_override_lookup, traefik_fqdn_override_lookup) }}" -traefik_entrypoint_web_default: "{{ 'web' if not (lookup('vars', traefik_role_var + '_traefik_tailscale_enabled', default=false) | bool) else 'tailscale-web' }}"-traefik_entrypoint_web: "{{ lookup('vars', traefik_role_var + '_traefik_entrypoint_web', default=traefik_entrypoint_web_default) }}"-traefik_entrypoint_websecure_default: "{{ 'websecure' if not (lookup('vars', traefik_role_var + '_traefik_tailscale_enabled', default=false) | bool) else 'tailscale-websecure' }}"-traefik_entrypoint_websecure: "{{ lookup('vars', traefik_role_var + '_traefik_entrypoint_websecure', default=traefik_entrypoint_websecure_default) }}"+traefik_entrypoint_tailscale_default: false+traefik_entrypoint_web_default: "{{ 'web' if not (lookup('role_var', '_traefik_tailscale_enabled', default=traefik_entrypoint_tailscale_default) | bool) else 'tailscale-web' }}"+traefik_entrypoint_web: "{{ lookup('role_var', '_traefik_entrypoint_web', default=traefik_entrypoint_web_default) }}"+traefik_entrypoint_websecure_default: "{{ 'websecure' if not (lookup('role_var', '_traefik_tailscale_enabled', default=traefik_entrypoint_tailscale_default) | bool) else 'tailscale-websecure' }}"+traefik_entrypoint_websecure: "{{ lookup('role_var', '_traefik_entrypoint_websecure', default=traefik_entrypoint_websecure_default) }}" traefik_url: "{{ 'https://'- + (lookup('vars', traefik_role_var + '_web_subdomain', default=lookup('vars', role_name + '_web_subdomain', default='')) + '.'- + lookup('vars', traefik_role_var + '_web_domain', default=lookup('vars', role_name + '_web_domain', default=''))- if (lookup('vars', traefik_role_var + '_web_subdomain', default=lookup('vars', role_name + '_web_subdomain', default='')) | length > 0)- else lookup('vars', traefik_role_var + '_web_domain', default=lookup('vars', role_name + '_web_domain', default=''))) }}"--traefik_themepark_labels:- - '{ "traefik.http.middlewares.themepark-{{ traefik_router }}.plugin.themepark.app": "{{ lookup("vars", traefik_role_var + "_themepark_app", default=role_name) }}" }'- - '{ "traefik.http.middlewares.themepark-{{ traefik_router }}.plugin.themepark.theme": "{{ lookup("vars", traefik_role_var + "_themepark_theme", default=lookup("vars", role_name + "_themepark_theme", default="")) }}" }'- - '{ "traefik.http.middlewares.themepark-{{ traefik_router }}.plugin.themepark.addons": "{{ (lookup("vars", traefik_role_var + "_themepark_addons", default=lookup("vars", role_name + "_themepark_addons", default="")) | join(","))- if lookup("vars", traefik_role_var + "_themepark_addons", default=lookup("vars", role_name + "_themepark_addons", default="")) | length > 0+ + ((lookup('role_var', '_web_subdomain', default='') + '.'+ + lookup('role_var', '_web_domain', default=''))+ if (lookup('role_var', '_web_subdomain', default='') | length > 0)+ else lookup('role_var', '_web_domain', default='')) }}"++traefik_themepark_labels_tmp:+ - '{ "traefik.http.middlewares.themepark-{{ traefik_router }}.plugin.themepark.app": "{{ lookup("role_var", "_themepark_app", default=role_name) }}" }'+ - '{ "traefik.http.middlewares.themepark-{{ traefik_router }}.plugin.themepark.theme": "{{ lookup("role_var", "_themepark_theme", default="") }}" }'+ - '{ "traefik.http.middlewares.themepark-{{ traefik_router }}.plugin.themepark.addons": "{{ (lookup("role_var", "_themepark_addons", default="") | join(","))+ if lookup("role_var", "_themepark_addons", default="") | length > 0 else omit }}" }' +traefik_themepark_labels: "{{ traefik_themepark_labels_tmp | map('from_json') | combine | dict2items | rejectattr('value', 'equalto', '') | items2dict }}"+ ################################ # Docker ################################--# Toggles pruning of dangling images after container creation.-docker_create_image_prune: true docker_volumes_downloads_nzbs: "{{ [downloads_usenet_path + ':/downloads/nzbs'] if nzbs_downloads_path_is_defined@@ -378,87 +397,105 @@ - "/mnt:/mnt:rslave" docker_volumes_common: "{{ docker_volumes_common_folders + docker_volumes_downloads_common- if (lookup('vars', traefik_role_var + '_docker_volumes_download', default=true) | bool)+ if (lookup('role_var', '_docker_volumes_download', default=true) | bool) else docker_volumes_common_folders }}" docker_hosts_common: {} docker_labels_docker_depends_on_template:- com.github.saltbox.depends_on: "{{ lookup('vars', traefik_role_var + '_depends_on', default=lookup('vars', role_name + '_depends_on', default=docker_container_network_mode_lookup))+ com.github.saltbox.depends_on: "{{ lookup('role_var', '_depends_on', default=docker_container_network_mode_lookup) if docker_container_network_mode- else lookup('vars', traefik_role_var + '_depends_on', default=lookup('vars', role_name + '_depends_on')) }}"+ else lookup('role_var', '_depends_on') }}" docker_labels_docker_depends_on_delay_template:- com.github.saltbox.depends_on.delay: "{{ lookup('vars', traefik_role_var + '_depends_on_delay', default=lookup('vars', role_name + '_depends_on_delay', default='0'))+ com.github.saltbox.depends_on.delay: "{{ lookup('role_var', '_depends_on_delay', default='0') if docker_container_network_mode- else lookup('vars', traefik_role_var + '_depends_on_delay', default=lookup('vars', role_name + '_depends_on_delay')) }}"+ else lookup('role_var', '_depends_on_delay') }}" docker_labels_docker_depends_on_healthchecks_template:- com.github.saltbox.depends_on.healthchecks: "{{ lookup('vars', traefik_role_var + '_depends_on_healthchecks', default=lookup('vars', role_name + '_depends_on_healthchecks', default='true'))+ com.github.saltbox.depends_on.healthchecks: "{{ lookup('role_var', '_depends_on_healthchecks', default='true') if docker_container_network_mode- else lookup('vars', traefik_role_var + '_depends_on_healthchecks', default=lookup('vars', role_name + '_depends_on_healthchecks')) }}"+ else lookup('role_var', '_depends_on_healthchecks') }}"++docker_labels_diun_enabled: true+docker_labels_autoheal_enabled: true+docker_labels_autoheal_timeout: "10" docker_labels_diun_template: diun.enable: "true" docker_labels_autoheal_template: autoheal: "true"- autoheal.stop.timeout: "{{ lookup('vars', role_name + '_docker_stop_timeout', default='10') }}"+ autoheal.stop.timeout: "{{ lookup('role_var', '_docker_stop_timeout', default=docker_labels_autoheal_timeout) }}" docker_labels_custom_common: {} docker_labels_saltbox_tmp: - com.github.saltbox.saltbox_managed: "true"- - "{{ docker_labels_diun_template if (lookup('vars', role_name + '_diun_enabled', default=true) | bool) else omit }}"- - "{{ docker_labels_autoheal_template if (lookup('vars', role_name + '_autoheal_enabled', default=true) | bool) else omit }}"- - "{{ docker_labels_docker_depends_on_template if ((lookup('vars', role_name + '_depends_on', default='') | length > 0) or docker_container_network_mode) else omit }}"- - "{{ docker_labels_docker_depends_on_delay_template if ((lookup('vars', role_name + '_depends_on_delay', default='') | length > 0) or docker_container_network_mode) else omit }}"- - "{{ docker_labels_docker_depends_on_healthchecks_template if ((lookup('vars', role_name + '_depends_on_healthchecks', default='') | length > 0) or docker_container_network_mode) else omit }}"+ - com.github.saltbox.saltbox_controller: "{{ 'true' if (lookup('role_var', '_docker_controller', default=true) | bool) else 'false' }}"+ - "{{ docker_labels_diun_template if (lookup('role_var', '_diun_enabled', default=docker_labels_diun_enabled) | bool) else omit }}"+ - "{{ docker_labels_autoheal_template if (lookup('role_var', '_autoheal_enabled', default=docker_labels_autoheal_enabled) | bool) else omit }}"+ - "{{ docker_labels_docker_depends_on_template if ((lookup('role_var', '_depends_on', default='') | length > 0) or docker_container_network_mode) else omit }}"+ - "{{ docker_labels_docker_depends_on_delay_template if ((lookup('role_var', '_depends_on_delay', default='') | length > 0) or docker_container_network_mode) else omit }}"+ - "{{ docker_labels_docker_depends_on_healthchecks_template if ((lookup('role_var', '_depends_on_healthchecks', default='') | length > 0) or docker_container_network_mode) else omit }}" - "{{ docker_labels_custom_common }}" -docker_container_network_mode: "{{ 'container:' in lookup('vars', traefik_role_var + '_docker_network_mode', default=lookup('vars', role_name + '_docker_network_mode', default=docker_networks_name_common)) }}"-docker_container_network_mode_lookup: "{{ lookup('vars', traefik_role_var + '_docker_network_mode', default=lookup('vars', role_name + '_docker_network_mode', default=docker_networks_name_common)) | split(':') | last }}"--docker_labels_saltbox: "{{ docker_labels_saltbox_tmp | reject('equalto', omit) | list }}"+docker_labels_wildcard_template:+ - '{ "traefik.http.routers.{{ traefik_router }}.tls.domains[0].main": "{{ traefik_domain }}" }'+ - '{ "traefik.http.routers.{{ traefik_router }}.tls.domains[0].sans": "{{ "*." + traefik_domain }}" }'+docker_labels_wildcard_dict: "{{ (docker_labels_wildcard_template | map('from_json') | combine) if traefik_role_wildcard_enabled else {} }}"++docker_labels_wildcard_api_template:+ - '{ "traefik.http.routers.{{ traefik_router }}-api.tls.domains[0].main": "{{ traefik_domain }}" }'+ - '{ "traefik.http.routers.{{ traefik_router }}-api.tls.domains[0].sans": "{{ "*." + traefik_domain }}" }'+docker_labels_wildcard_api_dict: "{{ (docker_labels_wildcard_api_template | map('from_json') | combine) if traefik_role_wildcard_enabled else {} }}"++docker_container_network_mode: "{{ 'container:' in lookup('role_var', '_docker_network_mode', default=docker_networks_name_common) }}"+docker_container_network_mode_lookup: "{{ lookup('role_var', '_docker_network_mode', default=docker_networks_name_common) | split(':') | last }}"++docker_labels_saltbox: "{{ docker_labels_saltbox_tmp | flatten | select('mapping') | combine }}" docker_labels_traefik_main:- - traefik.enable: "true"+ - '{ "traefik.enable": "true" }' - '{ "traefik.http.routers.{{ traefik_router }}-http.entrypoints": "{{ traefik_entrypoint_web }}" }' - '{ "traefik.http.routers.{{ traefik_router }}-http.service": "{{ traefik_router }}-http" }' - '{ "traefik.http.routers.{{ traefik_router }}-http.rule": "{{ traefik_host_template }}" }' - '{ "traefik.http.routers.{{ traefik_router }}-http.priority": "20" }'- - '{ "traefik.http.routers.{{ traefik_router }}-http.middlewares": "{{ lookup("vars", traefik_role_var + "_traefik_middleware_http", default=lookup("vars", role_name + "_traefik_middleware_http", default=traefik_default_middleware_http)) }}" }'+ - '{ "traefik.http.routers.{{ traefik_router }}-http.middlewares": "{{ lookup("role_var", "_traefik_middleware_http", default=traefik_default_middleware_http) }}" }' - '{ "traefik.http.routers.{{ traefik_router }}.entrypoints": "{{ traefik_entrypoint_websecure }}" }' - '{ "traefik.http.routers.{{ traefik_router }}.service": "{{ traefik_router }}" }' - '{ "traefik.http.routers.{{ traefik_router }}.rule": "{{ traefik_host_template }}" }'- - '{ "traefik.http.routers.{{ traefik_router }}.priority": "{{ lookup("vars", traefik_role_var + "_traefik_priority", default=lookup("vars", role_name + "_traefik_priority", default="20")) }}" }'- - '{ "traefik.http.routers.{{ traefik_router }}.tls.certresolver": "{{ lookup("vars", traefik_role_var + "_traefik_certresolver", default=lookup("vars", role_name + "_traefik_certresolver", default=traefik_default_certresolver)) }}" }'+ - '{ "traefik.http.routers.{{ traefik_router }}.priority": "{{ lookup("role_var", "_traefik_priority", default="20") }}" }'+ - '{ "traefik.http.routers.{{ traefik_router }}.tls.certresolver": "{{ lookup("role_var", "_traefik_certresolver", default=traefik_default_certresolver) }}" }' - '{ "traefik.http.routers.{{ traefik_router }}.tls.options": "securetls@file" }' - '{ "traefik.http.routers.{{ traefik_router }}.middlewares": "{{ traefik_middleware }}" }'- - '{ "traefik.http.services.{{ traefik_router }}-http.loadbalancer.server.port": "{{ lookup("vars", traefik_role_var + "_web_http_port", default=lookup("vars", role_name + "_web_http_port", default=traefik_loadbalancer_port)) }}" }'- - '{ "traefik.http.services.{{ traefik_router }}-http.loadbalancer.server.scheme": "{{ lookup("vars", traefik_role_var + "_web_http_scheme", default=lookup("vars", role_name + "_web_http_scheme", default=traefik_loadbalancer_scheme)) }}" }'- - '{ "traefik.http.services.{{ traefik_router }}-http.loadbalancer.serverstransport": "{{ lookup("vars", traefik_role_var + "_web_http_serverstransport", default=lookup("vars", role_name + "_web_http_serverstransport", default=omit)) }}" }'+ - '{ "traefik.http.services.{{ traefik_router }}-http.loadbalancer.server.port": "{{ lookup("role_var", "_web_http_port", default=traefik_loadbalancer_port) }}" }'+ - '{ "traefik.http.services.{{ traefik_router }}-http.loadbalancer.server.scheme": "{{ lookup("role_var", "_web_http_scheme", default=traefik_loadbalancer_scheme) }}" }'+ - '{ "traefik.http.services.{{ traefik_router }}-http.loadbalancer.serverstransport": "{{ lookup("role_var", "_web_http_serverstransport", default=omit) }}" }' - '{ "traefik.http.services.{{ traefik_router }}.loadbalancer.server.port": "{{ traefik_loadbalancer_port }}" }' - '{ "traefik.http.services.{{ traefik_router }}.loadbalancer.server.scheme": "{{ traefik_loadbalancer_scheme }}" }'- - '{ "traefik.http.services.{{ traefik_router }}.loadbalancer.serverstransport": "{{ lookup("vars", traefik_role_var + "_web_serverstransport", default=lookup("vars", role_name + "_web_serverstransport", default=omit)) }}" }'+ - '{ "traefik.http.services.{{ traefik_router }}.loadbalancer.serverstransport": "{{ lookup("role_var", "_web_serverstransport", default=omit) }}" }' docker_labels_traefik_api: - '{ "traefik.http.routers.{{ traefik_router }}-api-http.entrypoints": "{{ traefik_entrypoint_web }}" }' - '{ "traefik.http.routers.{{ traefik_router }}-api-http.service": "{{ traefik_router }}" }'- - '{ "traefik.http.routers.{{ traefik_router }}-api-http.rule": "{{ traefik_host_template }} && ({{ lookup("vars", traefik_role_var + "_traefik_api_endpoint", default=lookup("vars", role_name + "_traefik_api_endpoint", default=omit)) }})" }'+ - '{ "traefik.http.routers.{{ traefik_router }}-api-http.rule": "{{ traefik_host_template }} && ({{ lookup("role_var", "_traefik_api_endpoint", default=omit) }})" }' - '{ "traefik.http.routers.{{ traefik_router }}-api-http.priority": "30" }'- - '{ "traefik.http.routers.{{ traefik_router }}-api-http.middlewares": "{{ lookup("vars", traefik_role_var + "_traefik_api_middleware_http", default=lookup("vars", role_name + "_traefik_api_middleware_http", default=traefik_default_middleware_http_api)) }}" }'+ - '{ "traefik.http.routers.{{ traefik_router }}-api-http.middlewares": "{{ lookup("role_var", "_traefik_api_middleware_http", default=traefik_default_middleware_http_api) }}" }' - '{ "traefik.http.routers.{{ traefik_router }}-api.entrypoints": "{{ traefik_entrypoint_websecure }}" }' - '{ "traefik.http.routers.{{ traefik_router }}-api.service": "{{ traefik_router }}" }'- - '{ "traefik.http.routers.{{ traefik_router }}-api.rule": "{{ traefik_host_template }} && ({{ lookup("vars", traefik_role_var + "_traefik_api_endpoint", default=lookup("vars", role_name + "_traefik_api_endpoint", default=omit)) }})" }'+ - '{ "traefik.http.routers.{{ traefik_router }}-api.rule": "{{ traefik_host_template }} && ({{ lookup("role_var", "_traefik_api_endpoint", default=omit) }})" }' - '{ "traefik.http.routers.{{ traefik_router }}-api.priority": "30" }'- - '{ "traefik.http.routers.{{ traefik_router }}-api.tls.certresolver": "{{ lookup("vars", traefik_role_var + "_traefik_certresolver", default=lookup("vars", role_name + "_traefik_certresolver", default=traefik_default_certresolver)) }}" }'+ - '{ "traefik.http.routers.{{ traefik_router }}-api.tls.certresolver": "{{ lookup("role_var", "_traefik_certresolver", default=traefik_default_certresolver) }}" }' - '{ "traefik.http.routers.{{ traefik_router }}-api.tls.options": "securetls@file" }'- - '{ "traefik.http.routers.{{ traefik_router }}-api.middlewares": "{{ lookup("vars", traefik_role_var + "_traefik_api_middleware", default=lookup("vars", role_name + "_traefik_api_middleware", default=traefik_default_middleware_api)) }}" }'--docker_labels_traefik: "{{ docker_labels_traefik_main | combine(docker_labels_traefik_api)+ - '{ "traefik.http.routers.{{ traefik_router }}-api.middlewares": "{{ lookup("role_var", "_traefik_api_middleware", default=traefik_default_middleware_api) }}" }'++docker_labels_traefik_main_dict: "{{ docker_labels_traefik_main | map('from_json') | combine | dict2items | rejectattr('value', 'equalto', '') | items2dict }}"+docker_labels_traefik_api_dict: "{{ docker_labels_traefik_api | map('from_json') | combine | dict2items | rejectattr('value', 'equalto', '') | items2dict }}"++docker_labels_traefik: "{{ (docker_labels_traefik_main_dict | combine(docker_labels_traefik_api_dict, docker_labels_wildcard_dict, docker_labels_wildcard_api_dict) if traefik_role_api_enabled- else docker_labels_traefik_main }}"+ else docker_labels_traefik_main_dict) | combine(docker_labels_wildcard_dict) }}" docker_labels_common: "{{ docker_labels_saltbox | combine(docker_labels_traefik) if traefik_role_enabled@@ -466,8 +503,8 @@ docker_networks_name_common: saltbox -docker_networks_alias_template: "{{ (lookup('vars', role_name + '_docker_networks_alias') | split(' '))- + (lookup('vars', lookup('vars', role_name + '_name', default=role_name) + '_docker_networks_alias_custom', default=[])) }}"+docker_networks_alias_template: "{{ [lookup('role_var', '_docker_networks_alias')]+ + lookup('role_var', '_docker_networks_alias_custom', default=[]) }}" docker_networks_common: - name: "{{ docker_networks_name_common }}"@@ -486,14 +523,14 @@ docker_log_driver_json: "json-file" docker_log_driver: "{{ docker_log_driver_json- if docker.json_driver+ if (docker.json_driver | bool) else 'default' }}" docker_log_options_json: tag: "{% raw %}'{{.ImageName}}|{{.Name}}|{{.ImageFullID}}|{{.FullID}}'{% endraw %}" # noqa jinja[spacing] docker_log_options: "{{ docker_log_options_json- if docker.json_driver+ if (docker.json_driver | bool) else 'default' }}" docker_network_container_health_delay: 5@@ -511,8 +548,11 @@ ################################ reboot_is_necessary: false-skip_dns: "{{ not (dns.ipv4 or dns.ipv6) }}"-role_dns_enabled: "{{ lookup('vars', role_name + '_dns_enabled', default=true) | bool }}"+dns_proxied: "{{ dns.proxied | bool }}"+skip_dns: "{{ not (dns_ipv4_enabled or dns_ipv6_enabled) }}"+role_dns_enabled: "{{ lookup('role_var', '_dns_enabled', default=true) | bool }}"+dns_ipv4_enabled: "{{ dns.ipv4 | bool }}"+dns_ipv6_enabled: "{{ dns.ipv6 | bool }}" docker_legacy_volume: false ansible_retry_count: "2" ansible_retry_count_ci: "5"@@ -524,15 +564,23 @@ backup_rclone_transfers: 4 backup_rclone_drive_chunk_size: 128M backup_rclone_dropbox_chunk_size: 128M+backup_local_enabled: "{{ backup.local.enable | bool }}"+backup_rclone_enabled: "{{ backup.rclone.enable | bool }}"+backup_rsync_enabled: "{{ backup.rsync.enable | bool }}"+backup_snapshot_enabled: "{{ backup.misc.snapshot | bool }}" reverse_proxy_apps: - traefik -torrent_apps:- - deluge- - delugevpn- - qbittorrent- - qbittorrentvpn- - rutorrent- - transmission- - transmissionvpn+torrent_apps_saltbox:+ - "{{ deluge_instances | default('deluge') }}"+ - "{{ qbittorrent_instances | default('qbittorrent') }}"+ - "{{ rutorrent_name | default('rutorrent') }}"++torrent_apps_sandbox:+ - "{{ delugevpn_name | default('delugevpn') }}"+ - "{{ qbittorrentvpn_instances | default('qbittorrentvpn') }}"+ - "{{ transmission_instances | default('transmission') }}"+ - "{{ transmissionvpn_name | default('transmissionvpn') }}"++torrent_apps: "{{ (torrent_apps_saltbox + torrent_apps_sandbox) | flatten | unique | sort }}"
modified
library/find_open_port.py
@@ -2,7 +2,6 @@ from ansible.module_utils.basic import AnsibleModule import subprocess-import json DOCUMENTATION = """ ---@@ -36,6 +35,10 @@ def find_port(module, low_bound, high_bound, protocol): try:+ if low_bound < 1:+ module.fail_json(msg="Low bound must be at least 1")+ if high_bound > 65535:+ module.fail_json(msg="High bound must be at most 65535") if high_bound <= low_bound: module.fail_json(msg="High bound must be higher than low bound") @@ -46,14 +49,17 @@ if protocol == 'tcp': cmd = "ss -Htan" awk_cmd = "awk '{print $4}'"+ state_filter = "grep LISTEN" elif protocol == 'udp': cmd = "ss -Huan" awk_cmd = "awk '{print $4}'"+ state_filter = "grep UNCONN" else: # both cmd = "ss -Htuan" awk_cmd = "awk '{print $5}'"+ state_filter = "grep -E 'LISTEN|UNCONN'" - cmd += " | grep LISTEN | " + awk_cmd + " | grep -Eo '[0-9]+$' | sort -u"+ cmd += " | " + state_filter + " | " + awk_cmd + " | grep -Eo '[0-9]+$' | sort -u" # Run command to get ports in use ports_in_use = subprocess.check_output(cmd, shell=True)@@ -67,10 +73,12 @@ candidate = min(available_ports) return False, {"port": candidate} else:- return False, {"msg": "No available port found in the specified range"}+ return True, {"msg": "No available port found in the specified range"} - except Exception as e:- module.fail_json(msg=str(e))+ except subprocess.CalledProcessError as e:+ module.fail_json(msg=f"Failed to execute ss command: {e}")+ except ValueError as e:+ module.fail_json(msg=f"Failed to parse port numbers: {e}") def main(): module = AnsibleModule(
modified
library/migrate_folder.py
@@ -105,12 +105,13 @@ import grp import stat import traceback+from typing import Any, Optional, Tuple from ansible.module_utils.basic import AnsibleModule-from ansible.module_utils.pycompat24 import get_exception+ # Helper to safely get UID/GID-def get_id_info(module, owner=None, group=None):+def get_id_info(module: AnsibleModule, owner: Optional[str] = None, group: Optional[str] = None) -> Tuple[int, int]: uid = -1 gid = -1 if owner is not None:@@ -126,21 +127,23 @@ return uid, gid # Helper to validate and convert mode-def validate_mode(module, mode_str):+def validate_mode(module: AnsibleModule, mode_str: Optional[str]) -> Optional[int]: if mode_str is None: return None try: # Ensure it's treated as octal- if not isinstance(mode_str, str):- mode_str = str(mode_str)- if not mode_str.startswith('0'):- mode_str = '0' + mode_str # Ensure octal interpretation for int()- return int(mode_str, 8)+ mode_value = mode_str+ if not isinstance(mode_value, str):+ mode_value = str(mode_value)+ if not mode_value.startswith('0'):+ mode_value = '0' + mode_value # Ensure octal interpretation for int()+ return int(mode_value, 8) except (ValueError, TypeError): module.fail_json(msg=f"Invalid mode '{mode_str}' specified. Must be an octal number string (e.g., '0775').")---def run_module():+ return None # This line is unreachable but satisfies type checker+++def run_module() -> None: module_args = dict( legacy_path=dict(type='str', required=True), new_path=dict(type='str', required=True),@@ -150,7 +153,7 @@ recurse=dict(type='bool', required=False, default=False) ) - result = dict(+ result: dict[str, Any] = dict( changed=False, moved=False, created=False,@@ -181,18 +184,15 @@ # Check path statuses and types legacy_exists = os.path.lexists(legacy_path) new_exists = os.path.lexists(new_path)- legacy_is_dir = False- new_is_dir = False-- if legacy_exists:- if not os.path.isdir(legacy_path):- module.fail_json(msg=f"Legacy path '{legacy_path}' exists but is not a directory.")- legacy_is_dir = True-- if new_exists:- if not os.path.isdir(new_path):- module.fail_json(msg=f"New path '{new_path}' exists but is not a directory.")- new_is_dir = True++ if legacy_exists and not os.path.isdir(legacy_path):+ module.fail_json(msg=f"Legacy path '{legacy_path}' exists but is not a directory.")++ if new_exists and not os.path.isdir(new_path):+ module.fail_json(msg=f"New path '{new_path}' exists but is not a directory.")++ legacy_is_dir = legacy_exists and os.path.isdir(legacy_path)+ new_is_dir = new_exists and os.path.isdir(new_path) # --- Check Mode Early Exit --- if module.check_mode:@@ -241,20 +241,19 @@ try: # Ensure all parent directories exist before moving parent_dir = os.path.dirname(new_path)- created_dirs = []- + if parent_dir and not os.path.exists(parent_dir): # Find which directories we'll need to create path_to_create = parent_dir- dirs_to_create = []- + dirs_to_create: list[str] = []+ while path_to_create and path_to_create != '/' and path_to_create != '' and not os.path.exists(path_to_create): dirs_to_create.append(path_to_create) path_to_create = os.path.dirname(path_to_create)- + # Create parent directories os.makedirs(parent_dir, exist_ok=True)- + # Apply ownership and permissions only to directories we just created if (owner is not None or group is not None or mode_int is not None) and dirs_to_create: for created_dir in dirs_to_create:@@ -262,7 +261,7 @@ try: if owner is not None or group is not None: current_stat = os.stat(created_dir)- os.chown(created_dir, + os.chown(created_dir, uid if uid != -1 else current_stat.st_uid, gid if gid != -1 else current_stat.st_gid) if mode_int is not None:@@ -349,7 +348,7 @@ # --- Exit --- module.exit_json(**result) -def main():+def main() -> None: run_module() if __name__ == '__main__':
modified
library/qbittorrent_passwd.py
@@ -1,40 +1,123 @@ #!/usr/bin/python-from ansible.module_utils.basic import AnsibleModule+# -*- coding: utf-8 -*-++from __future__ import absolute_import, division, print_function+__metaclass__ = type++DOCUMENTATION = r'''+---+module: qbittorrent_password_hash+short_description: Generates a password hash compatible with qBittorrent.+description:+ - Takes a plain text password and generates a salted hash using the PBKDF2-HMAC-SHA512 algorithm.+ - Uses 100,000 iterations and a 16-byte random salt, matching qBittorrent's expected format.+ - The output format is "@ByteArray(SALT_BASE64:HASH_BASE64)".+ - This module is useful for generating password hashes to be placed in qBittorrent configuration files non-interactively.+ - The input password parameter has `no_log=True` set for security.+options:+ password:+ description: The plain text password to hash.+ type: str+ required: true+ no_log: true+author:+ - Salty+'''++EXAMPLES = r'''+- name: Generate qBittorrent password hash+ qbittorrent_passwd:+ password: "supersecretpassword"+ register: qbit_hash_result++- name: Display the generated hash+ debug:+ var: qbit_hash_result.hash+'''++RETURN = r'''+hash:+ description: The generated qBittorrent-compatible password hash string.+ type: str+ returned: on success+ sample: "@ByteArray(aBcDeFgHiJkLmNoPqRsTuVw==:xYz123AbCdEfGhIjKlMnOpQrStUvWxYz12/abc+def=)"+changed:+ description: Indicates if any state was changed. Always false for this module.+ type: bool+ returned: always+ sample: false+'''+ import base64 import hashlib import os+from typing import Any+from ansible.module_utils.basic import AnsibleModule -def qbittorrent_passwd(plain_passwd):++def generate_qbittorrent_hash(plain_passwd: str) -> str:+ """+ Generates a qBittorrent compatible password hash (PBKDF2-HMAC-SHA512).+ """+ ITERATIONS = 100_000 # Standard iteration count for qBittorrent+ SALT_SIZE = 16 # Standard salt size (bytes)+ try:- ITERATIONS = 100_000- SALT_SIZE = 16+ salt = os.urandom(SALT_SIZE)+ # Ensure password is bytes for hashing+ password_bytes = plain_passwd.encode()+ + # Generate the hash+ derived_key = hashlib.pbkdf2_hmac(+ hash_name='sha512',+ password=password_bytes,+ salt=salt,+ iterations=ITERATIONS+ )+ + # Encode salt and hash in Base64+ salt_b64 = base64.b64encode(salt).decode()+ hash_b64 = base64.b64encode(derived_key).decode()+ + # Format according to qBittorrent's expectation+ return f"@ByteArray({salt_b64}:{hash_b64})" - salt = os.urandom(SALT_SIZE)- h = hashlib.pbkdf2_hmac("sha512", plain_passwd.encode(), salt, ITERATIONS)- return "@ByteArray({}:{})".format(base64.b64encode(salt).decode(), base64.b64encode(h).decode()) except Exception as e:- raise ValueError(f"Error generating password hash: {str(e)}")+ # Wrap underlying exception for better debugging upstream+ raise ValueError(f"Error generating password hash: {str(e)}") from e -def main():++def main() -> None: module_args = dict( password=dict(type='str', required=True, no_log=True) ) - result = dict(- changed=False,- msg=''- )+ result: dict[str, Any] = {+ 'changed': False,+ 'hash': '',+ } module = AnsibleModule( argument_spec=module_args,- supports_check_mode=True+ supports_check_mode=False ) + plain_password: str = module.params['password']+ try:- result['msg'] = qbittorrent_passwd(module.params['password'])+ # Generate the hash using the dedicated function+ generated_hash = generate_qbittorrent_hash(plain_password)+ result['hash'] = generated_hash+ except ValueError as err:- module.fail_json(msg=str(err))+ # If the hashing function raised ValueError, fail the module+ module.fail_json(msg=f"Failed to generate qBittorrent hash: {str(err)}", **result)+ except Exception as e:+ # Catch any other unexpected errors during execution+ module.fail_json(msg=f"An unexpected error occurred: {str(e)}", **result) ++ # Exit successfully, returning the hash module.exit_json(**result) if __name__ == '__main__':
modified
library/saltbox_facts.py
@@ -18,6 +18,7 @@ owner: user1 group: group1 mode: "0640"+ base_path: "{{ server_appdata_path }}" register: register_var # Save facts with overwrite (ignores existing values)@@ -28,6 +29,7 @@ keys: key1: value1 key2: value2+ base_path: "{{ server_appdata_path }}" overwrite: true register: register_var @@ -41,6 +43,7 @@ keys: key1: "" key2: ""+ base_path: "{{ server_appdata_path }}" # Delete entire instance - name: Delete instance@@ -49,6 +52,7 @@ instance: instance1 method: delete delete_type: instance+ base_path: "{{ server_appdata_path }}" # Delete entire role (removes configuration file) - name: Delete role@@ -57,6 +61,7 @@ instance: instance1 method: delete delete_type: role+ base_path: "{{ server_appdata_path }}" # Save with default owner/group (current user) - name: Save facts with defaults@@ -65,6 +70,7 @@ instance: instance1 keys: key1: value1+ base_path: "{{ server_appdata_path }}" register: register_var # Save with specific file permissions@@ -75,6 +81,7 @@ keys: key1: value1 mode: "0600"+ base_path: "{{ server_appdata_path }}" register: register_var Return Values:@@ -90,22 +97,22 @@ description: Informational or error message type: str returned: when applicable- warnings:- description: List of warning messages- type: list- returned: when applicable """ -from ansible.module_utils.basic import AnsibleModule+from __future__ import annotations+ import configparser+import grp import os import pwd-import grp+import shutil import tempfile-import shutil from io import StringIO--def validate_instance_name(instance):+from typing import Any++from ansible.module_utils.basic import AnsibleModule++def validate_instance_name(instance: Any) -> None: """ Validate that the instance name is a string. @@ -118,7 +125,7 @@ if not isinstance(instance, str): raise ValueError("Instance name must be a string") -def validate_keys(keys):+def validate_keys(keys: Any) -> None: """ Validate configuration keys and values. @@ -130,7 +137,7 @@ """ if not isinstance(keys, dict): raise ValueError("Keys must be a dictionary")- + for key, value in keys.items(): if not isinstance(key, str): raise ValueError(f"Invalid key '{key}': must be a string")@@ -139,12 +146,13 @@ f"Invalid value type for key '{key}': must be string, number, or boolean" ) -def get_file_path(role):+def get_file_path(role: str, base_path: str) -> str: """ Get the configuration file path for a role. Args: role (str): Name of the role+ base_path (str): Base directory path Returns: str: Full path to the configuration file@@ -154,9 +162,9 @@ """ if not isinstance(role, str): raise ValueError("Role name must be a string")- return f"/opt/saltbox/{role}.ini"--def atomic_write(file_path, content, mode, owner, group):+ return f"{base_path}/saltbox/{role}.ini"++def atomic_write(file_path: str, content: str, mode: int, owner: str, group: str) -> None: """ Write content to file atomically with proper permissions. @@ -173,24 +181,24 @@ """ directory = os.path.dirname(file_path) os.makedirs(directory, exist_ok=True)- + temp_fd, temp_path = tempfile.mkstemp(dir=directory) try: with os.fdopen(temp_fd, 'w') as temp_file: temp_file.write(content)- + os.chmod(temp_path, mode)- os.chown(temp_path, + os.chown(temp_path, pwd.getpwnam(owner).pw_uid, grp.getgrnam(group).gr_gid)- + shutil.move(temp_path, file_path) except Exception: if os.path.exists(temp_path): os.unlink(temp_path) raise -def load_existing_facts(file_path, instance):+def load_existing_facts(file_path: str, instance: str) -> dict[str, str]: """ Load existing facts from configuration file for a specific instance. @@ -206,7 +214,7 @@ """ try: validate_instance_name(instance)- + config = configparser.ConfigParser( interpolation=None, comment_prefixes=('#',),@@ -215,11 +223,11 @@ delimiters=('=',), empty_lines_in_values=False )- - config.optionxform = str- - existing_facts = {}- ++ config.optionxform = lambda optionstr: optionstr # Preserve case sensitivity for config keys++ existing_facts: dict[str, str] = {}+ if os.path.exists(file_path): config.read(file_path) if config.has_section(instance):@@ -228,15 +236,15 @@ value = config.get(instance, key) if value != 'None': existing_facts[key] = value- + return existing_facts- + except configparser.Error as e: raise Exception(f"Configuration parsing error: {str(e)}") except Exception as e: raise Exception(f"Unexpected error: {str(e)}") -def process_facts(file_path, instance, keys, owner, group, mode, overwrite=False):+def process_facts(file_path: str, instance: str, keys: dict[str, Any], owner: str, group: str, mode: int, overwrite: bool = False) -> tuple[dict[str, str], bool]: """ Process facts by loading existing values and saving new ones as needed. @@ -258,14 +266,14 @@ try: validate_instance_name(instance) validate_keys(keys)- + # Load existing facts first existing_facts = load_existing_facts(file_path, instance)- + # Determine final facts based on overwrite setting- final_facts = {}- keys_to_save = {}- + final_facts: dict[str, str] = {}+ keys_to_save: dict[str, str] = {}+ if overwrite: # Overwrite mode: use provided keys, keep existing keys not in provided keys final_facts.update(existing_facts)@@ -275,16 +283,16 @@ # Default mode: keep existing values, only add new keys final_facts.update({k: str(v) for k, v in keys.items()}) final_facts.update(existing_facts) # Existing values override new ones- + # Only save keys that don't exist yet for key, value in keys.items(): if key not in existing_facts: keys_to_save[key] = str(value)- + # If no new keys to save, return existing facts without changes if not keys_to_save: return final_facts, False- + # Save new/updated keys config = configparser.ConfigParser( interpolation=None,@@ -294,21 +302,21 @@ delimiters=('=',), empty_lines_in_values=False )- - config.optionxform = str- ++ config.optionxform = lambda optionstr: optionstr # Preserve case sensitivity for config keys+ if os.path.exists(file_path): config.read(file_path) changed = False- + if not config.has_section(instance): config.add_section(instance) changed = True for key, value in keys_to_save.items():- if (not config.has_section(instance) or - not config.has_option(instance, key) or + if (not config.has_section(instance) or+ not config.has_option(instance, key) or config.get(instance, key) != str(value)): config.set(instance, key, str(value)) changed = True@@ -317,11 +325,11 @@ with StringIO() as string_buffer: config.write(string_buffer) config_str = string_buffer.getvalue()- + atomic_write(file_path, config_str, mode, owner, group) return final_facts, changed- + except (OSError, IOError) as e: raise Exception(f"File operation error: {str(e)}") except configparser.Error as e:@@ -329,7 +337,7 @@ except Exception as e: raise Exception(f"Unexpected error: {str(e)}") -def delete_facts(file_path, delete_type, instance, keys):+def delete_facts(file_path: str, delete_type: str, instance: str, keys: dict[str, Any]) -> bool: """ Delete facts from configuration file. @@ -356,7 +364,7 @@ return False config = configparser.ConfigParser(interpolation=None)- config.optionxform = str+ config.optionxform = lambda optionstr: optionstr # Preserve case sensitivity for config keys config.read(file_path) changed = False @@ -374,14 +382,14 @@ with StringIO() as string_buffer: config.write(string_buffer) config_str = string_buffer.getvalue()- + stat = os.stat(file_path)- atomic_write(file_path, config_str, stat.st_mode, + atomic_write(file_path, config_str, stat.st_mode, pwd.getpwuid(stat.st_uid).pw_name, grp.getgrgid(stat.st_gid).gr_name) return changed- + except (OSError, IOError) as e: raise Exception(f"File operation error: {str(e)}") except configparser.Error as e:@@ -389,7 +397,7 @@ except Exception as e: raise Exception(f"Unexpected error: {str(e)}") -def parse_mode(mode):+def parse_mode(mode: Any) -> int: """ Parse and validate file mode. @@ -413,7 +421,7 @@ else: raise ValueError("Mode must be a quoted octal number starting with '0' (e.g., '0640').") -def get_current_user():+def get_current_user() -> str: """ Get current user name. @@ -422,7 +430,7 @@ """ return pwd.getpwuid(os.getuid()).pw_name -def run_module():+def run_module() -> None: """ Main module execution. @@ -440,6 +448,7 @@ - group (str): File group (default: current user) - mode (str): File mode in octal string format (default: '0640') - overwrite (bool): If True, overwrite existing values; if False, keep existing (default: False)+ - base_path (str): Base directory path for storing configuration files (required) """ module_args = dict( role=dict(type='str', required=True),@@ -450,14 +459,14 @@ owner=dict(type='str', required=False), group=dict(type='str', required=False), mode=dict(type='str', required=False, default='0640'),- overwrite=dict(type='bool', required=False, default=False)+ overwrite=dict(type='bool', required=False, default=False),+ base_path=dict(type='str', required=True) ) result = dict( changed=False, message='',- facts={},- warnings=[]+ facts={} ) module = AnsibleModule(@@ -466,19 +475,20 @@ ) try:- role = module.params['role']- instance = module.params['instance']- method = module.params.get('method')- keys = module.params['keys']- delete_type = module.params.get('delete_type')- overwrite = module.params['overwrite']- + role: str = module.params['role']+ instance: str = module.params['instance']+ method: str | None = module.params.get('method')+ keys: dict[str, Any] = module.params['keys']+ delete_type: str | None = module.params.get('delete_type')+ overwrite: bool = module.params['overwrite']+ base_path: str = module.params['base_path']+ current_user = get_current_user()- owner = module.params.get('owner') or current_user- group = module.params.get('group') or current_user+ owner: str = module.params.get('owner') or current_user+ group: str = module.params.get('group') or current_user mode = parse_mode(module.params['mode'])- file_path = get_file_path(role)+ file_path = get_file_path(role, base_path) if method == 'delete': if not delete_type:@@ -495,7 +505,7 @@ except Exception as e: module.fail_json(msg=str(e)) -def main():+def main() -> None: """ Module entry point. """
modified
requirements/requirements-saltbox.txt
@@ -1,8 +1,10 @@-ansible==11.11.0-ansible-lint==25.9.2+aiohttp==3.13.2+ansible==13.0.0+ansible-lint==25.11.0 apprise==1.9.5 argon2_cffi==25.1.0 certbot==5.1.0+cloudflare==4.3.1 dnspython==2.8.0 docker==7.1.0 jinja2==3.1.6@@ -12,9 +14,10 @@ netaddr==1.3.0 passlib==1.7.4 pexpect==4.9.0+psycopg[binary]==3.2.12 PyMySQL==1.1.2 pyOpenSSL==25.3.0 requests==2.32.5 ruamel.yaml==0.18.16 tld==0.13.1-uv==0.9.6+uv==0.9.10
modified
resources/roles/dns/defaults/main.yml
@@ -8,12 +8,3 @@ ######################################################################### --- cloudflare_allow_nested_proxy: false-cloudflare_reinstall: false-cloudflare_path: "/srv/cloudflare-helper"-cloudflare_venv_path: "{{ cloudflare_path }}/venv"-cloudflare_python_version: "3.10"-cloudflare_script_path: "{{ cloudflare_path }}/fetch_cloudflare_records.py"-cloudflare_requirements_path: "{{ cloudflare_path }}/requirements.txt"-cloudflare_files:- - "fetch_cloudflare_records.py"- - "requirements.txt"
modified
resources/roles/dns/tasks/cloudflare/main.yml
@@ -7,39 +7,26 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Cloudflare | Cleanup legacy folder+- name: Cloudflare | Cleanup legacy folders ansible.builtin.file:- path: "/srv/cloudflare"+ path: "{{ item }}" state: absent+ loop:+ - "/srv/cloudflare"+ - "/srv/cloudflare-helper" -- name: Cloudflare | Check if '{{ cloudflare_path }}' folder exists- ansible.builtin.stat:- path: "{{ cloudflare_path }}"- register: cloudflare_folder--- name: Cloudflare | Check if Python symlink exists- ansible.builtin.stat:- path: "{{ cloudflare_path }}/venv/bin/python{{ cloudflare_python_version }}"- register: cloudflare_folder_symlink--- name: Cloudflare | Python Migration- ansible.builtin.include_tasks: "subtasks/python.yml"- when: cloudflare_folder_symlink.stat.exists--- name: Cloudflare | Checksum- ansible.builtin.include_tasks: "subtasks/checksum.yml"--- name: Cloudflare | Setup- ansible.builtin.include_tasks: "subtasks/setup.yml"- when: (not cloudflare_folder.stat.exists) or (not cloudflare_folder_symlink.stat.exists) or (cloudflare_reinstall | bool)+- name: Set IP variables+ ansible.builtin.set_fact:+ ipv4_address_public_template: "{{ lookup('vars', ansible_parent_role_names | first + '_dns_ipv4', default=lookup('vars', ansible_parent_role_names | first + '_role_dns_ipv4', default=ip_address_public)) }}"+ ipv6_address_public_template: "{{ lookup('vars', ansible_parent_role_names | first + '_dns_ipv4', default=lookup('vars', ansible_parent_role_names | first + '_role_dns_ipv6', default=ipv6_address_public)) }}" - name: Cloudflare | Tailscale block- when: traefik_tailscale_enabled and lookup('vars', ansible_parent_role_names | first + '_traefik_tailscale_enabled', default=false)+ when: traefik_tailscale_enabled and lookup('vars', ansible_parent_role_names | first + '_traefik_tailscale_enabled', default=lookup('vars', ansible_parent_role_names | first + '_role_traefik_tailscale_enabled', default=false)) block: - name: Get Tailscale IPs ansible.builtin.shell: "tailscale ip" register: dns_tailscale_ips- failed_when: dns_tailscale_ips.rc != 0+ failed_when: (dns_tailscale_ips.rc != 0) - name: Set Tailscale variables ansible.builtin.set_fact:@@ -48,8 +35,8 @@ - name: Cloudflare | Add DNS Record ansible.builtin.include_tasks: "subtasks/add_dns_record.yml"- when: dns_action != 'remove'+ when: (dns_action != 'remove') - name: Cloudflare | Remove DNS Record ansible.builtin.include_tasks: "subtasks/remove_dns_record.yml"- when: dns_action == 'remove'+ when: (dns_action == 'remove')
modified
resources/roles/dns/tasks/cloudflare/subtasks/add_dns_record.yml
@@ -15,18 +15,16 @@ when: (not cloudflare_allow_nested_proxy) and ("." in dns_record) and dns_proxy - name: Cloudflare | Add DNS Record | Fetch Record information- ansible.builtin.shell: "{{ cloudflare_path }}/venv/bin/python3 {{ cloudflare_script_path }} --auth_key '{{ cloudflare.api }}' --auth_email '{{ cloudflare.email }}' --zone_name '{{ dns_zone }}' --record '{{ dns_record }}.{{ dns_zone }}'"+ cloudflare_dns_records:+ auth_email: "{{ cloudflare.email }}"+ auth_key: "{{ cloudflare.api }}"+ zone_name: "{{ dns_zone }}"+ record: "{{ dns_record }}.{{ dns_zone }}" register: cloudflare_record- ignore_errors: true -- name: Cloudflare | Add DNS Record | Print Failure Output- ansible.builtin.fail:- msg: "{{ cloudflare_record.stderr }}"- when: cloudflare_record.rc != 0--- name: Cloudflare | Add DNS Record | Convert JSON to Dictionary+- name: Cloudflare | Add DNS Record | Set variables ansible.builtin.set_fact:- cloudflare_record_dict: "{{ cloudflare_record.stdout | from_json }}"+ cloudflare_record_dict: "{{ cloudflare_record.records }}" cloudflare_ipv4_record: [] cloudflare_ipv6_record: [] @@ -45,7 +43,7 @@ label: "{{ item.name }} - {{ item.type }}" - name: Cloudflare | Add DNS Record | Tasks on success- when: cloudflare_dns_record_removal_status is succeeded and cloudflare_dns_record_removal_status.changed+ when: (cloudflare_dns_record_removal_status is succeeded) and cloudflare_dns_record_removal_status.changed block: - name: Cloudflare | Add DNS Record | Set 'dns_record_print' variable ansible.builtin.set_fact:@@ -73,20 +71,20 @@ - name: Cloudflare | Add DNS Record | IPv4 Tasks ansible.builtin.include_tasks: "add_dns_record/ipv4_enabled.yml"- when: dns.ipv4 and- ((not ((cloudflare_ipv4_record is defined and (cloudflare_ipv4_record | length > 0)) and (cloudflare_ipv4_record.content == ip_address_public) and (cloudflare_ipv4_record.proxied == dns_proxy)))- or not (cloudflare_ipv4_record is defined and (cloudflare_ipv4_record | length > 0)))+ when: dns_ipv4_enabled and+ ((not (((cloudflare_ipv4_record is defined) and (cloudflare_ipv4_record | length > 0)) and (cloudflare_ipv4_record.content == ipv4_address_public_template) and (cloudflare_ipv4_record.proxied == dns_proxy)))+ or not ((cloudflare_ipv4_record is defined) and (cloudflare_ipv4_record | length > 0))) - name: Cloudflare | Add DNS Record | IPv4 Disable Tasks ansible.builtin.include_tasks: "add_dns_record/ipv4_disabled.yml"- when: (not dns.ipv4) and (cloudflare_ipv4_record is defined and (cloudflare_ipv4_record | length > 0))+ when: (not dns_ipv4_enabled) and ((cloudflare_ipv4_record is defined) and (cloudflare_ipv4_record | length > 0)) - name: Cloudflare | Add DNS Record | IPv6 Enabled Block ansible.builtin.include_tasks: "add_dns_record/ipv6_enabled.yml"- when: dns.ipv6 and- ((not ((cloudflare_ipv6_record is defined and (cloudflare_ipv6_record | length > 0)) and (cloudflare_ipv6_record.content == ipv6_address_public) and (cloudflare_ipv6_record.proxied == dns_proxy)))- or not (cloudflare_ipv6_record is defined and (cloudflare_ipv6_record | length > 0)))+ when: dns_ipv6_enabled and+ ((not (((cloudflare_ipv6_record is defined) and (cloudflare_ipv6_record | length > 0)) and (cloudflare_ipv6_record.content == ipv6_address_public_template) and (cloudflare_ipv6_record.proxied == dns_proxy)))+ or not ((cloudflare_ipv6_record is defined) and (cloudflare_ipv6_record | length > 0))) - name: Cloudflare | Add DNS Record | IPv6 Disable Block ansible.builtin.include_tasks: "add_dns_record/ipv6_disabled.yml"- when: (not dns.ipv6) and (cloudflare_ipv6_record is defined and (cloudflare_ipv6_record | length > 0))+ when: (not dns_ipv6_enabled) and ((cloudflare_ipv6_record is defined) and (cloudflare_ipv6_record | length > 0))
modified
resources/roles/dns/tasks/cloudflare/subtasks/add_dns_record/ipv4_enabled.yml
@@ -10,7 +10,7 @@ - name: Cloudflare | Add DNS Record | IPv4 | Validate IP variable ansible.builtin.fail: msg: "{{ ip_address_public_error }}"- when: (not ip_address_public_is_valid)+ when: (not ip_address_public_is_valid) and (ipv4_address_public_template == ip_address_public) - name: Cloudflare | Add DNS Record | IPv4 | Add DNS Record community.general.cloudflare_dns:@@ -21,8 +21,8 @@ solo: true proxied: "{{ dns_proxy }}" type: A- value: "{{ ip_address_public- if not lookup('vars', ansible_parent_role_names | first + '_traefik_tailscale_enabled', default=false)+ value: "{{ ipv4_address_public_template+ if not lookup('vars', ansible_parent_role_names | first + '_traefik_tailscale_enabled', default=lookup('vars', ansible_parent_role_names | first + '_role_traefik_tailscale_enabled', default=false)) else dns_tailscale_ipv4 }}" record: "{{ dns_record }}" register: cloudflare_dns_record_creation_status@@ -36,4 +36,4 @@ - name: Cloudflare | Add DNS Record | IPv4 | Display DNS Record creation status ansible.builtin.debug:- msg: "DNS A Record for '{{ dns_record_print }}' set to '{{ ip_address_public }}' was added. Proxy: {{ dns_proxy }}"+ msg: "DNS A Record for '{{ dns_record_print }}' set to '{{ ipv4_address_public_template }}' was added. Proxy: {{ dns_proxy }}"
modified
resources/roles/dns/tasks/cloudflare/subtasks/add_dns_record/ipv6_enabled.yml
@@ -10,7 +10,7 @@ - name: Cloudflare | Add DNS Record | IPv6 | Validate IPv6 variable ansible.builtin.fail: msg: "{{ ipv6_address_public_error }}"- when: (not ipv6_address_public_is_valid)+ when: (not ipv6_address_public_is_valid) and (ipv6_address_public_template == ipv6_address_public) - name: Cloudflare | Add DNS Record | IPv6 | Add DNS Record community.general.cloudflare_dns:@@ -21,8 +21,8 @@ solo: true proxied: "{{ dns_proxy }}" type: AAAA- value: "{{ ipv6_address_public- if not lookup('vars', ansible_parent_role_names | first + '_traefik_tailscale_enabled', default=false)+ value: "{{ ipv6_address_public_template+ if (not lookup('vars', ansible_parent_role_names | first + '_traefik_tailscale_enabled', default=lookup('vars', ansible_parent_role_names | first + '_role_traefik_tailscale_enabled', default=false))) else dns_tailscale_ipv6 }}" record: "{{ dns_record }}" register: cloudflare_dns_v6_record_creation_status@@ -36,4 +36,4 @@ - name: Cloudflare | Add DNS Record | IPv6 | Display DNS Record creation status ansible.builtin.debug:- msg: "DNS AAAA Record for '{{ dns_record_print }}' set to '{{ ipv6_address_public }}' was added. Proxy: {{ dns_proxy }}"+ msg: "DNS AAAA Record for '{{ dns_record_print }}' set to '{{ ipv6_address_public_template }}' was added. Proxy: {{ dns_proxy }}"
modified
resources/roles/dns/tasks/cloudflare/subtasks/remove_dns_record.yml
@@ -18,7 +18,7 @@ register: cloudflare_dns_record_removal_status - name: Cloudflare | Remove DNS Record | IPv4 | Tasks on success- when: cloudflare_dns_record_removal_status is succeeded+ when: (cloudflare_dns_record_removal_status is succeeded) block: - name: Cloudflare | Remove DNS Record | IPv4 | Set 'dns_record_print' variable ansible.builtin.set_fact:@@ -39,7 +39,7 @@ register: cloudflare_dns_v6_record_removal_status - name: Cloudflare | Remove DNS Record | IPv6 | Tasks on success- when: cloudflare_dns_v6_record_removal_status is succeeded+ when: (cloudflare_dns_v6_record_removal_status is succeeded) block: - name: Cloudflare | Remove DNS Record | IPv6 | Set 'dns_record_print' variable ansible.builtin.set_fact:
modified
resources/tasks/directories/create_directories.yml
@@ -11,9 +11,9 @@ ansible.builtin.file: path: "{{ item }}" state: directory- owner: "{{ lookup('vars', role_name + '_paths_owner', default=user.name) }}"- group: "{{ lookup('vars', role_name + '_paths_group', default=user.name) }}"- mode: "{{ lookup('vars', role_name + '_paths_permissions', default='0775') }}"- recurse: "{{ lookup('vars', role_name + '_paths_recursive', default=false) }}"- with_items: "{{ lookup('vars', role_name + '_paths_folders_list')- + lookup('vars', role_name + '_paths_folders_list_custom', default=[]) }}"+ owner: "{{ lookup('role_var', '_paths_owner', default=user.name) }}"+ group: "{{ lookup('role_var', '_paths_group', default=user.name) }}"+ mode: "{{ lookup('role_var', '_paths_permissions', default='0775') }}"+ recurse: "{{ lookup('role_var', '_paths_recursive', default=false) }}"+ with_items: "{{ lookup('role_var', '_paths_folders_list')+ + lookup('role_var', '_paths_folders_list_custom', default=[]) }}"
modified
resources/tasks/dns/tasker2.yml
@@ -7,49 +7,18 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Resources | Tasks | DNS | Tasker | Get FLD- ansible.builtin.shell: |- {{ saltbox_python }} -c "from tld import get_tld; res = get_tld(\"http://{{ dns_zone | default(user.domain) }}\", as_object=True); print(res.fld)"- register: fld--- name: Resources | Tasks | DNS | Tasker | Get subdomain- when: dns_record | length > 0- block:- - name: Resources | Tasks | DNS | Tasker | Set subdomain var- ansible.builtin.set_fact:- subdomain_block: 1-- - name: Resources | Tasks | DNS | Tasker | Get subdomain- ansible.builtin.shell: |- {{ saltbox_python }} -c "from tld import get_tld; res = get_tld(\"http://{{ dns_record }}.{{ dns_zone | default(user.domain) }}\", as_object=True); print(res.subdomain)"- register: subdomain_notempty--- name: Resources | Tasks | DNS | Tasker | Get subdomain- when: dns_record | length == 0- block:- - name: Resources | Tasks | DNS | Tasker | Set subdomain var- ansible.builtin.set_fact:- subdomain_block: 0-- - name: Resources | Tasks | DNS | Tasker | Get subdomain- ansible.builtin.shell: |- {{ saltbox_python }} -c "from tld import get_tld; res = get_tld(\"http://{{ dns_zone | default(user.domain) }}\", as_object=True); print(res.subdomain)"- register: subdomain_empty--- name: Resources | Tasks | DNS | Tasker | Set subdomain var- ansible.builtin.set_fact:- subdomain: "{{ subdomain_notempty.stdout- if (subdomain_block)- else subdomain_empty.stdout }}"+- name: Resources | Tasks | DNS | Tasker | Parse domain+ tld_parse:+ url: "{{ dns_zone | default(user.domain) }}"+ record: "{{ dns_record | default('') }}"+ register: domain_parsed - name: Resources | Tasks | DNS | Tasker | Set '_dns_*' variables ansible.builtin.set_fact:- _dns_tasker_zone: "{{ fld.stdout }}"- _dns_tasker_record: "{{ subdomain- if (subdomain | length > 0)- else '@' }}"+ _dns_tasker_zone: "{{ domain_parsed.fld }}"+ _dns_tasker_record: "{{ domain_parsed.record }}" _dns_tasker_action: "{{ dns_action | default('add') }}"- _dns_tasker_proxy: "{{ dns_proxy | default(dns.proxied) }}"+ _dns_tasker_proxy: "{{ dns_proxy | default(dns_proxied) }}" - name: Resources | Tasks | DNS | Tasker | Sent task to DNS Role ansible.builtin.include_role:
modified
resources/tasks/docker/create_docker_container.yml
@@ -13,126 +13,185 @@ if (var_prefix is defined) else role_name }}" +- name: Resources | Tasks | Docker | Create Docker Container | Set instance name+ ansible.builtin.set_fact:+ _instance_name: "{{ lookup('vars', _var_prefix + '_name', default=_var_prefix) }}"++- name: Resources | Tasks | Docker | Create Docker Container | Set variables that need omit handling+ ansible.builtin.set_fact:+ _docker_command: "{{ (lookup('docker_var', '_docker_commands', default=[]) | reject('equalto', omit) | list) }}"+ _docker_cpus: "{{ lookup('docker_var', '_docker_cpus', default=docker_cpus_default) }}"+ _docker_env: "{{ (nvidia_docker_env if use_nvidia else {}) | combine(lookup('docker_var', '_docker_envs', default={})) }}"+ _docker_etc_hosts: "{{ (docker_hosts_common if lookup('docker_var', '_docker_hosts_use_common', default=true) else {}) | combine(lookup('docker_var', '_docker_hosts', default={}))+ if not ('container:' in lookup('docker_var', '_docker_network_mode', default=docker_networks_name_common))+ else {} }}"+ _docker_exposed_ports: "{{ (lookup('docker_var', '_docker_exposed_ports', default=[]) | unique | reject('equalto', omit) | list)+ if not ('container:' in lookup('docker_var', '_docker_network_mode', default=docker_networks_name_common))+ else [] }}"+ _docker_hostname: "{{ lookup('docker_var', '_docker_hostname', default='')+ if (lookup('docker_var', '_docker_network_mode', default=docker_networks_name_common) == docker_networks_name_common)+ else '' }}"+ _docker_labels: "{{ (docker_labels_common if lookup('docker_var', '_docker_labels_use_common', default=true) else {}) | combine(lookup('docker_var', '_docker_labels', default={})) }}"+ _docker_log_driver: "{{ docker_log_driver if (docker_log_driver != 'default') else lookup('docker_var', '_docker_log_driver', default='') }}"+ _docker_log_options: "{{ docker_log_options if (docker_log_options != 'default') else lookup('docker_var', '_docker_log_options', default={}) }}"+ _docker_memory: "{{ lookup('docker_var', '_docker_memory', default=docker_memory_default) }}"+ _docker_networks: "{{ lookup('docker_var', '_docker_networks', default=[])+ if not ((lookup('docker_var', '_docker_network_mode', default=docker_networks_name_common) == 'host') or ('container' in lookup('docker_var', '_docker_network_mode', default=docker_networks_name_common)))+ else [] }}"+ _docker_published_ports: "{{ (lookup('docker_var', '_docker_ports', default=[]) | unique | reject('equalto', omit) | list)+ if not ('container:' in lookup('docker_var', '_docker_network_mode', default=docker_networks_name_common))+ else [] }}"+ _docker_volumes: "{{ ((docker_volumes_common + lookup('docker_var', '_docker_volumes', default=[]))+ if (lookup('docker_var', '_docker_volumes_global', default=true) | bool)+ else lookup('docker_var', '_docker_volumes', default=[])) | unique | reject('equalto', omit) | list }}"+ - name: Resources | Tasks | Docker | Create Docker Container | Network Container Health Status ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/network_container_health_status.yml"- when: ('container:' in lookup('vars', _var_prefix + '_docker_network_mode', default=docker_networks_name_common))+ when: ('container:' in lookup('docker_var', '_docker_network_mode', default=docker_networks_name_common)) - name: Resources | Tasks | Docker | Create Docker Container | Create Docker Container # noqa args[module] community.docker.docker_container:- auto_remove: "{{ lookup('vars', _var_prefix + '_docker_auto_remove', default=omit) }}"- blkio_weight: "{{ lookup('vars', _var_prefix + '_docker_blkio_weight', default=omit) }}"- cap_drop: "{{ lookup('vars', _var_prefix + '_docker_cap_drop', default=omit) }}"- capabilities: "{{ lookup('vars', _var_prefix + '_docker_capabilities', default=omit) }}"- cgroup_parent: "{{ lookup('vars', _var_prefix + '_docker_cgroup_parent', default=omit) }}"- cleanup: "{{ lookup('vars', _var_prefix + '_docker_cleanup', default=omit) }}"- command: "{{ lookup('vars', _var_prefix + '_docker_commands', default=omit) | reject('equalto', omit) | list }}"+ auto_remove: "{{ lookup('docker_var', '_docker_auto_remove', default=omit) }}"+ blkio_weight: "{{ lookup('docker_var', '_docker_blkio_weight', default=omit) }}"+ cap_drop: "{{ lookup('docker_var', '_docker_cap_drop', default=omit) }}"+ capabilities: "{{ lookup('docker_var', '_docker_capabilities', default=omit) }}"+ cgroup_parent: "{{ lookup('docker_var', '_docker_cgroup_parent', default=omit) }}"+ cgroupns_mode: "{{ lookup('docker_var', '_docker_cgroupns_mode', default=omit) }}"+ cleanup: "{{ lookup('docker_var', '_docker_cleanup', default=omit) }}"+ command: "{{ _docker_command+ if (_docker_command | length > 0)+ else omit }}" command_handling: compatibility container_default_behavior: compatibility- cpu_period: "{{ lookup('vars', _var_prefix + '_docker_cpu_period', default=omit) }}"- cpu_quota: "{{ lookup('vars', _var_prefix + '_docker_cpu_quota', default=omit) }}"- cpu_shares: "{{ lookup('vars', _var_prefix + '_docker_cpu_shares', default=omit) }}"- cpus: "{{ (docker_cpus_default is defined) | ternary(docker_cpus_default, lookup('vars', _var_prefix + '_docker_cpus', default=omit)) }}"- cpuset_cpus: "{{ lookup('vars', _var_prefix + '_docker_cpuset_cpus', default=omit) }}"- cpuset_mems: "{{ lookup('vars', _var_prefix + '_docker_cpuset_mems', default=omit) }}"+ cpu_period: "{{ lookup('docker_var', '_docker_cpu_period', default=omit) }}"+ cpu_quota: "{{ lookup('docker_var', '_docker_cpu_quota', default=omit) }}"+ cpu_shares: "{{ lookup('docker_var', '_docker_cpu_shares', default=omit) }}"+ cpus: "{{ _docker_cpus+ if (_docker_cpus | length > 0)+ else omit }}"+ cpuset_cpus: "{{ lookup('docker_var', '_docker_cpuset_cpus', default=omit) }}"+ cpuset_mems: "{{ lookup('docker_var', '_docker_cpuset_mems', default=omit) }}" default_host_ip: ""- device_read_bps: "{{ lookup('vars', _var_prefix + '_docker_device_read_bps', default=omit) }}"- device_read_iops: "{{ lookup('vars', _var_prefix + '_docker_device_read_iops', default=omit) }}"- device_requests: "{{ lookup('vars', _var_prefix + '_docker_device_requests', default=omit) }}"- device_write_bps: "{{ lookup('vars', _var_prefix + '_docker_device_write_bps', default=omit) }}"- device_write_iops: "{{ lookup('vars', _var_prefix + '_docker_device_write_iops', default=omit) }}"- devices: "{{ lookup('vars', _var_prefix + '_docker_devices', default=omit) }}"- dns_opts: "{{ lookup('vars', _var_prefix + '_docker_dns_opts', default=omit) }}"- dns_search_domains: "{{ lookup('vars', _var_prefix + '_docker_dns_search_domains', default=omit) }}"- dns_servers: "{{ lookup('vars', _var_prefix + '_docker_dns_servers', default=omit) }}"- domainname: "{{ lookup('vars', _var_prefix + '_docker_domainname', default=omit) }}"- entrypoint: "{{ lookup('vars', _var_prefix + '_docker_entrypoint', default=omit) }}"- env: "{{ (nvidia_docker_env if use_nvidia else {})- | combine(lookup('vars', _var_prefix + '_docker_envs', default={}))- | default(omit) }}"- env_file: "{{ lookup('vars', _var_prefix + '_docker_env_file', default=omit) }}"- etc_hosts: "{{ lookup('vars', _var_prefix + '_docker_hosts', default=omit)- if not ('container:' in lookup('vars', _var_prefix + '_docker_network_mode', default=docker_networks_name_common))- else {} }}"- exposed_ports: "{{ (lookup('vars', _var_prefix + '_docker_exposed_ports', default=[]) | unique | reject('equalto', omit) | list)- if not ('container:' in lookup('vars', _var_prefix + '_docker_network_mode', default=docker_networks_name_common))- else [] }}"- groups: "{{ lookup('vars', _var_prefix + '_docker_groups', default=omit) }}"- healthcheck: "{{ lookup('vars', _var_prefix + '_docker_healthcheck', default=omit) }}"- hostname: "{{ lookup('vars', _var_prefix + '_docker_hostname', default=omit)- if (lookup('vars', _var_prefix + '_docker_network_mode', default=docker_networks_name_common) == docker_networks_name_common)+ detach: true+ device_cgroup_rules: "{{ lookup('docker_var', '_docker_device_cgroup_rules', default=omit) }}"+ device_read_bps: "{{ lookup('docker_var', '_docker_device_read_bps', default=omit) }}"+ device_read_iops: "{{ lookup('docker_var', '_docker_device_read_iops', default=omit) }}"+ device_requests: "{{ lookup('docker_var', '_docker_device_requests', default=omit) }}"+ device_write_bps: "{{ lookup('docker_var', '_docker_device_write_bps', default=omit) }}"+ device_write_iops: "{{ lookup('docker_var', '_docker_device_write_iops', default=omit) }}"+ devices: "{{ lookup('docker_var', '_docker_devices', default=omit) }}"+ dns_opts: "{{ lookup('docker_var', '_docker_dns_opts', default=omit) }}"+ dns_search_domains: "{{ lookup('docker_var', '_docker_dns_search_domains', default=omit) }}"+ dns_servers: "{{ lookup('docker_var', '_docker_dns_servers', default=omit) }}"+ domainname: "{{ lookup('docker_var', '_docker_domainname', default=omit) }}"+ entrypoint: "{{ lookup('docker_var', '_docker_entrypoint', default=omit) }}"+ env: "{{ _docker_env+ if (_docker_env | length > 0)+ else omit }}"+ env_file: "{{ lookup('docker_var', '_docker_env_file', default=omit) }}"+ etc_hosts: "{{ _docker_etc_hosts+ if (_docker_etc_hosts | length > 0)+ else omit }}"+ exposed_ports: "{{ _docker_exposed_ports+ if (_docker_exposed_ports | length > 0)+ else omit }}"+ groups: "{{ lookup('docker_var', '_docker_groups', default=omit) }}"+ healthcheck: "{{ lookup('docker_var', '_docker_healthcheck', default=omit) }}"+ healthy_wait_timeout: "{{ lookup('docker_var', '_docker_healthy_wait_timeout', default=300) }}"+ hostname: "{{ _docker_hostname+ if (_docker_hostname | length > 0) else omit }}"- image: "{{ lookup('vars', _var_prefix + '_docker_image') }}"- init: "{{ lookup('vars', _var_prefix + '_docker_init', default=omit) }}"- ipc_mode: "{{ lookup('vars', _var_prefix + '_docker_ipc_mode', default=omit) }}"- keep_volumes: "{{ lookup('vars', _var_prefix + '_docker_keep_volumes', default=omit) }}"- kernel_memory: "{{ lookup('vars', _var_prefix + '_docker_kernel_memory', default=omit) }}"- labels: "{{ lookup('vars', _var_prefix + '_docker_labels', default=omit) }}"- links: "{{ lookup('vars', _var_prefix + '_docker_links', default=omit) }}"- log_driver: "{{ (docker_log_driver != 'default') | ternary(docker_log_driver, lookup('vars', _var_prefix + '_docker_log_driver', default=omit)) }}"- log_options: "{{ (docker_log_options != 'default') | ternary(docker_log_options, lookup('vars', _var_prefix + '_docker_log_options', default=omit)) }}"- memory: "{{ (docker_memory_default is defined) | ternary(docker_memory_default, lookup('vars', _var_prefix + '_docker_memory', default=omit)) }}"- memory_reservation: "{{ lookup('vars', _var_prefix + '_docker_memory_reservation', default=omit) }}"- memory_swap: "{{ lookup('vars', _var_prefix + '_docker_memory_swap', default=omit) }}"- memory_swappiness: "{{ lookup('vars', _var_prefix + '_docker_memory_swappiness', default=omit) }}"- mounts: "{{ lookup('vars', _var_prefix + '_docker_mounts', default=omit) }}"- name: "{{ lookup('vars', _var_prefix + '_docker_container', default=_var_prefix) }}"- network_mode: "{{ lookup('vars', _var_prefix + '_docker_network_mode', default=docker_networks_name_common) }}"- networks: "{{ omit- if (lookup('vars', _var_prefix + '_docker_network_mode', default=docker_networks_name_common) == 'host') or ('container' in lookup('vars', _var_prefix + '_docker_network_mode', default=docker_networks_name_common))- else lookup('vars', _var_prefix + '_docker_networks') }}"+ image: "{{ lookup('docker_var', '_docker_image') }}"+ image_comparison: "desired-image"+ image_label_mismatch: "ignore"+ image_name_mismatch: "recreate"+ init: "{{ lookup('docker_var', '_docker_init', default=omit) }}"+ ipc_mode: "{{ lookup('docker_var', '_docker_ipc_mode', default=omit) }}"+ keep_volumes: "{{ lookup('docker_var', '_docker_keep_volumes', default=omit) }}"+ kernel_memory: "{{ lookup('docker_var', '_docker_kernel_memory', default=omit) }}"+ kill_signal: "{{ lookup('docker_var', '_docker_kill_signal', default=omit) }}"+ labels: "{{ _docker_labels+ if (_docker_labels | length > 0)+ else omit }}"+ links: "{{ lookup('docker_var', '_docker_links', default=omit) }}"+ log_driver: "{{ _docker_log_driver+ if (_docker_log_driver | length > 0)+ else omit }}"+ log_options: "{{ _docker_log_options+ if (_docker_log_options | length > 0)+ else omit }}"+ memory: "{{ _docker_memory+ if (_docker_memory | length > 0)+ else omit }}"+ memory_reservation: "{{ lookup('docker_var', '_docker_memory_reservation', default=omit) }}"+ memory_swap: "{{ lookup('docker_var', '_docker_memory_swap', default=omit) }}"+ memory_swappiness: "{{ lookup('docker_var', '_docker_memory_swappiness', default=omit) }}"+ mounts: "{{ lookup('docker_var', '_docker_mounts', default=omit) }}"+ name: "{{ lookup('docker_var', '_docker_container', default=_instance_name) }}"+ network_mode: "{{ lookup('docker_var', '_docker_network_mode', default=docker_networks_name_common) }}"+ networks: "{{ _docker_networks+ if (_docker_networks | length > 0)+ else omit }}" networks_cli_compatible: true- oom_killer: "{{ lookup('vars', _var_prefix + '_docker_oom_killer', default=omit) }}"- oom_score_adj: "{{ lookup('vars', _var_prefix + '_docker_oom_score_adj', default=omit) }}"- output_logs: "{{ lookup('vars', _var_prefix + '_docker_output_logs', default=omit) }}"- paused: "{{ lookup('vars', _var_prefix + '_docker_paused', default=omit) }}"- pid_mode: "{{ lookup('vars', _var_prefix + '_docker_pid_mode', default=omit) }}"- privileged: "{{ lookup('vars', _var_prefix + '_docker_privileged', default=omit) }}"- published_ports: "{{ (lookup('vars', _var_prefix + '_docker_ports', default=[]) | unique | reject('equalto', omit) | list)- if not ('container:' in lookup('vars', _var_prefix + '_docker_network_mode', default=docker_networks_name_common))- else [] }}"- pull: "{{ lookup('vars', _var_prefix + '_docker_image_pull', default=true) | bool }}"- read_only: "{{ lookup('vars', _var_prefix + '_docker_read_only', default=omit) }}"- recreate: "{{ lookup('vars', _var_prefix + '_docker_recreate', default=omit) }}"- restart_policy: "{{ lookup('vars', _var_prefix + '_docker_restart_policy', default='unless-stopped') }}"- restart_retries: "{{ lookup('vars', _var_prefix + '_docker_restart_retries', default=omit) }}"- runtime: "{{ lookup('vars', _var_prefix + '_docker_runtime', default=omit) }}"- security_opts: "{{ lookup('vars', _var_prefix + '_docker_security_opts', default=omit) }}"- shm_size: "{{ lookup('vars', _var_prefix + '_docker_shm_size', default=omit) }}"+ oom_killer: "{{ lookup('docker_var', '_docker_oom_killer', default=omit) }}"+ oom_score_adj: "{{ lookup('docker_var', '_docker_oom_score_adj', default=omit) }}"+ output_logs: "{{ lookup('docker_var', '_docker_output_logs', default=omit) }}"+ paused: "{{ lookup('docker_var', '_docker_paused', default=omit) }}"+ pid_mode: "{{ lookup('docker_var', '_docker_pid_mode', default=omit) }}"+ privileged: "{{ lookup('docker_var', '_docker_privileged', default=omit) }}"+ published_ports: "{{ _docker_published_ports+ if (_docker_published_ports | length > 0)+ else omit }}"+ pull: "{{ lookup('docker_var', '_docker_image_pull', default=true) | bool }}"+ pull_check_mode_behavior: "image_not_present"+ read_only: "{{ lookup('docker_var', '_docker_read_only', default=omit) }}"+ recreate: "{{ lookup('docker_var', '_docker_recreate', default=omit) }}"+ restart_policy: "{{ lookup('docker_var', '_docker_restart_policy', default='unless-stopped') }}"+ restart_retries: "{{ lookup('docker_var', '_docker_restart_retries', default=omit) }}"+ runtime: "{{ lookup('docker_var', '_docker_runtime', default=omit) }}"+ security_opts: "{{ lookup('docker_var', '_docker_security_opts', default=omit) }}"+ shm_size: "{{ lookup('docker_var', '_docker_shm_size', default=omit) }}" state: started- stop_timeout: "{{ lookup('vars', _var_prefix + '_docker_stop_timeout', default='10') }}"- storage_opts: "{{ lookup('vars', _var_prefix + '_docker_storage_opts', default=omit) }}"- sysctls: "{{ lookup('vars', _var_prefix + '_docker_sysctls', default=omit) }}"+ stop_timeout: "{{ lookup('docker_var', '_docker_stop_timeout', default='10') }}"+ storage_opts: "{{ lookup('docker_var', '_docker_storage_opts', default=omit) }}"+ sysctls: "{{ lookup('docker_var', '_docker_sysctls', default=omit) }}" tls_hostname: localhost- tmpfs: "{{ lookup('vars', _var_prefix + '_docker_tmpfs', default=omit) }}"- ulimits: "{{ lookup('vars', _var_prefix + '_docker_ulimits', default=omit) }}"- user: "{{ lookup('vars', _var_prefix + '_docker_user', default=omit) }}"- userns_mode: "{{ lookup('vars', _var_prefix + '_docker_userns_mode', default=omit) }}"- uts: "{{ lookup('vars', _var_prefix + '_docker_uts', default=omit) }}"- volume_driver: "{{ lookup('vars', _var_prefix + '_docker_volume_driver', default=omit) }}"- volumes: "{{ ((docker_volumes_common + lookup('vars', _var_prefix + '_docker_volumes', default=omit))- if (lookup('vars', _var_prefix + '_docker_volumes_global', default=true) | bool)- else lookup('vars', _var_prefix + '_docker_volumes', default=omit)) | unique | reject('equalto', omit) | list }}"- volumes_from: "{{ lookup('vars', _var_prefix + '_docker_volumes_from', default=omit) }}"- working_dir: "{{ lookup('vars', _var_prefix + '_docker_working_dir', default=omit) }}"+ tmpfs: "{{ lookup('docker_var', '_docker_tmpfs', default=omit) }}"+ ulimits: "{{ lookup('docker_var', '_docker_ulimits', default=omit) }}"+ user: "{{ lookup('docker_var', '_docker_user', default=omit) }}"+ userns_mode: "{{ lookup('docker_var', '_docker_userns_mode', default=omit) }}"+ uts: "{{ lookup('docker_var', '_docker_uts', default=omit) }}"+ volume_driver: "{{ lookup('docker_var', '_docker_volume_driver', default=omit) }}"+ volumes: "{{ _docker_volumes+ if (_docker_volumes | length > 0)+ else omit }}"+ volumes_from: "{{ lookup('docker_var', '_docker_volumes_from', default=omit) }}"+ working_dir: "{{ lookup('docker_var', '_docker_working_dir', default=omit) }}" register: create_docker_result retries: "{{ ansible_retry_count if (not continuous_integration) else ansible_retry_count_ci }}"- timeout: "{{ 300+ timeout: "{{ lookup('docker_var', '_docker_create_timeout', default=120) if continuous_integration else omit }}" delay: 10- until: create_docker_result is succeeded+ until: (create_docker_result is succeeded) -- name: Resources | Tasks | Docker | Create Docker Container | Prune dangling images+- name: "Resources | Tasks | Docker | Create Docker Container | Wait for {{ docker_create_image_prune_delay_timeout }} seconds"+ ansible.builtin.wait_for:+ timeout: "{{ docker_create_image_prune_delay_timeout }}"+ when: docker_create_image_prune and docker_create_image_prune_delay++- name: Resources | Tasks | Docker | Create Docker Container | Prune images community.docker.docker_prune: images: true images_filters:- dangling: true+ until: 24h+ timeout: 120 when: docker_create_image_prune register: prune_images_result retries: "{{ ansible_retry_count if (not continuous_integration) else ansible_retry_count_ci }}" delay: 10- until: prune_images_result is succeeded+ until: (prune_images_result is succeeded)
modified
resources/tasks/docker/network_container_health_status.yml
@@ -11,17 +11,21 @@ ansible.builtin.wait_for: timeout: "{{ docker_network_container_health_delay }}" -- name: Resources | Tasks | Docker | Network Container Health Status | Check health of network linked container+- name: "Resources | Tasks | Docker | Network Container Health Status | Set instance name"+ ansible.builtin.set_fact:+ _instance_name: "{{ lookup('vars', _var_prefix + '_name', default=_var_prefix) }}"++- name: "Resources | Tasks | Docker | Network Container Health Status | Check health of network linked container" community.docker.docker_container_info:- name: "{{ lookup('vars', _var_prefix + '_docker_network_mode', default=docker_networks_name_common).split(':')[1] }}"+ name: "{{ lookup('docker_var', '_docker_network_mode', default=docker_networks_name_common).split(':')[1] }}" register: docker_info -- name: Resources | Tasks | Docker | Network Container Health Status | Fail if network linked container does not exist+- name: "Resources | Tasks | Docker | Network Container Health Status | Fail if network linked container does not exist" ansible.builtin.fail:- msg: "{{ lookup('vars', _var_prefix + '_docker_container', default=_var_prefix) }} is configured to use {{ lookup('vars', _var_prefix + '_docker_network_mode', default=docker_networks_name_common).split(':')[1] }} for networking but it does not exist"- when: not docker_info.exists+ msg: "{{ lookup('docker_var', '_docker_container', default=_var_prefix) }} is configured to use {{ lookup('docker_var', '_docker_network_mode', default=docker_networks_name_common).split(':')[1] }} for networking but it does not exist"+ when: (not docker_info.exists) -- name: Resources | Tasks | Docker | Network Container Health Status | Fail if network linked container is not healthy+- name: "Resources | Tasks | Docker | Network Container Health Status | Fail if network linked container is not healthy" ansible.builtin.fail:- msg: "{{ lookup('vars', _var_prefix + '_docker_container', default=_var_prefix) }} is configured to use {{ lookup('vars', _var_prefix + '_docker_network_mode', default=docker_networks_name_common).split(':')[1] }} for networking but it is not healthy"- when: ('State' not in docker_info.container or 'Health' not in docker_info.container.State or 'Status' not in docker_info.container.State.Health or docker_info.container.State.Health.Status != 'healthy')+ msg: "{{ lookup('docker_var', '_docker_container', default=_var_prefix) }} is configured to use {{ lookup('docker_var', '_docker_network_mode', default=docker_networks_name_common).split(':')[1] }} for networking but it is not healthy"+ when: (('State' not in docker_info.container) or ('Health' not in docker_info.container.State) or ('Status' not in docker_info.container.State.Health) or (docker_info.container.State.Health.Status != 'healthy'))
modified
resources/tasks/docker/remove_docker_container.yml
@@ -13,18 +13,22 @@ if (var_prefix is defined) else role_name }}" +- name: Resources | Tasks | Docker | Remove Docker Container | Set instance name+ ansible.builtin.set_fact:+ _instance_name: "{{ lookup('vars', _var_prefix + '_name', default=_var_prefix) }}"+ - name: Resources | Tasks | Docker | Remove Docker Container | Remove Docker Container community.docker.docker_container: container_default_behavior: compatibility- force_kill: "{{ lookup('vars', _var_prefix + '_docker_force_kill', default=omit) }}"- kill_signal: "{{ lookup('vars', _var_prefix + '_docker_kill_signal', default=omit) }}"- name: "{{ var_prefix | default(lookup('vars', _var_prefix + '_docker_container', default=_var_prefix)) }}"+ force_kill: "{{ lookup('docker_var', '_docker_force_kill', default=omit) }}"+ kill_signal: "{{ lookup('docker_var', '_docker_kill_signal', default=omit) }}"+ name: "{{ lookup('docker_var', '_docker_container', default=_var_prefix) }}" state: absent- stop_timeout: "{{ lookup('vars', _var_prefix + '_docker_stop_timeout', default='10') }}"+ stop_timeout: "{{ lookup('docker_var', '_docker_stop_timeout', default='180') }}" tls_hostname: localhost register: remove_docker_result retries: "{{ ansible_retry_count if (not continuous_integration) else ansible_retry_count_ci }}" delay: 10- until: remove_docker_result is succeeded+ until: (remove_docker_result is succeeded)
modified
resources/tasks/docker/restart_docker_container.yml
@@ -13,14 +13,18 @@ if (var_prefix is defined) else role_name }}" +- name: Resources | Tasks | Docker | Restart Docker Container | Set instance name+ ansible.builtin.set_fact:+ _instance_name: "{{ lookup('vars', _var_prefix + '_name', default=_var_prefix) }}"+ - name: Resources | Tasks | Docker | Restart Docker Container | Stop Docker Container community.docker.docker_container: container_default_behavior: compatibility- force_kill: "{{ lookup('vars', _var_prefix + '_docker_force_kill', default=omit) }}"- kill_signal: "{{ lookup('vars', _var_prefix + '_docker_kill_signal', default=omit) }}"- name: "{{ lookup('vars', _var_prefix + '_docker_container', default=_var_prefix) }}"+ force_kill: "{{ lookup('docker_var', '_docker_force_kill', default=omit) }}"+ kill_signal: "{{ lookup('docker_var', '_docker_kill_signal', default=omit) }}"+ name: "{{ lookup('docker_var', '_docker_container', default=_var_prefix) }}" state: stopped- stop_timeout: "{{ lookup('vars', _var_prefix + '_docker_stop_timeout', default='180') }}"+ stop_timeout: "{{ lookup('docker_var', '_docker_stop_timeout', default='180') }}" tls_hostname: localhost comparisons: '*': ignore@@ -29,12 +33,12 @@ if (not continuous_integration) else ansible_retry_count_ci }}" delay: 10- until: stop_docker_result is succeeded+ until: (stop_docker_result is succeeded) - name: Resources | Tasks | Docker | Restart Docker Container | Start Docker Container community.docker.docker_container: container_default_behavior: compatibility- name: "{{ lookup('vars', _var_prefix + '_docker_container', default=_var_prefix) }}"+ name: "{{ lookup('docker_var', '_docker_container', default=_var_prefix) }}" state: started tls_hostname: localhost comparisons:@@ -44,4 +48,4 @@ if (not continuous_integration) else ansible_retry_count_ci }}" delay: 10- until: start_docker_result is succeeded+ until: (start_docker_result is succeeded)
modified
resources/tasks/docker/set_docker_devices_variable.yml
@@ -21,4 +21,4 @@ - name: Resources | Tasks | Docker | Set Docker Devices Variable | Set 'docker_devices' variable # noqa var-naming[no-jinja] ansible.builtin.set_fact:- "{{ docker_devices_var_name }}": "{{ ['/dev/dri:/dev/dri'] + lookup('vars', role_name + '_docker_devices_default', default=[]) }}"+ "{{ docker_devices_var_name }}": "{{ ['/dev/dri:/dev/dri'] + lookup('docker_var', '_docker_devices_default', default=[]) }}"
modified
resources/tasks/docker/start_docker_container.yml
@@ -13,10 +13,14 @@ if (var_prefix is defined) else role_name }}" +- name: Resources | Tasks | Docker | Start Docker Container | Set instance name+ ansible.builtin.set_fact:+ _instance_name: "{{ lookup('vars', _var_prefix + '_name', default=_var_prefix) }}"+ - name: Resources | Tasks | Docker | Start Docker Container | Start Docker Container community.docker.docker_container: container_default_behavior: compatibility- name: "{{ lookup('vars', _var_prefix + '_docker_container', default=_var_prefix) }}"+ name: "{{ lookup('docker_var', '_docker_container', default=_var_prefix) }}" state: started tls_hostname: localhost comparisons:@@ -26,4 +30,4 @@ if (not continuous_integration) else ansible_retry_count_ci }}" delay: 10- until: start_docker_result is succeeded+ until: (start_docker_result is succeeded)
modified
resources/tasks/docker/start_saltbox_docker_containers.yml
@@ -12,8 +12,8 @@ - name: "Resources | Tasks | Docker | Start Saltbox Docker Containers | Get Docker service state" ansible.builtin.set_fact:- start_docker_service_running: "{{ (services['docker.service'] is defined) and (services['docker.service']['state'] == 'running') }}"- start_docker_containers_docker_controller_service_running: "{{ (services['saltbox_managed_docker_controller.service'] is defined) and (services['saltbox_managed_docker_controller.service']['state'] == 'running') }}"+ start_docker_service_running: "{{ (ansible_facts['services']['docker.service'] is defined) and (ansible_facts['services']['docker.service']['state'] == 'running') }}"+ start_docker_containers_docker_controller_service_running: "{{ (ansible_facts['services']['saltbox_managed_docker_controller.service'] is defined) and (ansible_facts['services']['saltbox_managed_docker_controller.service']['state'] == 'running') }}" - name: Resources | Tasks | Docker | Start Saltbox Docker Containers | Run Controller Tasks when: start_docker_service_running and start_docker_containers_docker_controller_service_running
modified
resources/tasks/docker/stop_docker_container.yml
@@ -13,14 +13,18 @@ if (var_prefix is defined) else role_name }}" +- name: Resources | Tasks | Docker | Stop Docker Container | Set instance name+ ansible.builtin.set_fact:+ _instance_name: "{{ lookup('vars', _var_prefix + '_name', default=_var_prefix) }}"+ - name: Resources | Tasks | Docker | Stop Docker Container | Stop Docker Container community.docker.docker_container: container_default_behavior: compatibility- force_kill: "{{ lookup('vars', _var_prefix + '_docker_force_kill', default=omit) }}"- kill_signal: "{{ lookup('vars', _var_prefix + '_docker_kill_signal', default=omit) }}"- name: "{{ lookup('vars', _var_prefix + '_docker_container', default=_var_prefix) }}"+ force_kill: "{{ lookup('docker_var', '_docker_force_kill', default=omit) }}"+ kill_signal: "{{ lookup('docker_var', '_docker_kill_signal', default=omit) }}"+ name: "{{ lookup('docker_var', '_docker_container', default=_var_prefix) }}" state: stopped- stop_timeout: "{{ lookup('vars', _var_prefix + '_docker_stop_timeout', default='180') }}"+ stop_timeout: "{{ lookup('docker_var', '_docker_stop_timeout', default='180') }}" tls_hostname: localhost comparisons: '*': ignore@@ -29,4 +33,4 @@ if (not continuous_integration) else ansible_retry_count_ci }}" delay: 10- until: stop_docker_result is succeeded+ until: (stop_docker_result is succeeded)
modified
resources/tasks/docker/stop_saltbox_docker_containers.yml
@@ -12,8 +12,8 @@ - name: "Resources | Tasks | Docker | Stop Saltbox Docker Containers | Get Docker service state" ansible.builtin.set_fact:- stop_docker_service_running: "{{ (services['docker.service'] is defined) and (services['docker.service']['state'] == 'running') }}"- stop_docker_containers_docker_controller_service_running: "{{ (services['saltbox_managed_docker_controller.service'] is defined) and (services['saltbox_managed_docker_controller.service']['state'] == 'running') }}"+ stop_docker_service_running: "{{ (ansible_facts['services']['docker.service'] is defined) and (ansible_facts['services']['docker.service']['state'] == 'running') }}"+ stop_docker_containers_docker_controller_service_running: "{{ (ansible_facts['services']['saltbox_managed_docker_controller.service'] is defined) and (ansible_facts['services']['saltbox_managed_docker_controller.service']['state'] == 'running') }}" - name: Resources | Tasks | Docker | Stop Saltbox Docker Containers | Run Controller Tasks when: stop_docker_service_running and stop_docker_containers_docker_controller_service_running
modified
resources/tasks/git/clone_git_repo.yml
@@ -15,10 +15,10 @@ - name: Resources | Tasks | Git | Clone Git Repo | Set '_git_repo_*' variables ansible.builtin.set_fact:- _git_repo_url: "{{ lookup('vars', _var_prefix + '_git_repo_url') }}"- _git_repo_dest: "{{ lookup('vars', _var_prefix + '_git_repo_dest') }}"- _git_repo_branch_primary: "{{ lookup('vars', _var_prefix + '_git_repo_branch_primary') }}"- _git_repo_branch_secondary: "{{ lookup('vars', _var_prefix + '_git_repo_branch_secondary', default='master') }}"+ _git_repo_url: "{{ git_repo_url }}"+ _git_repo_dest: "{{ git_repo_dest }}"+ _git_repo_branch_primary: "{{ git_repo_branch_primary }}"+ _git_repo_branch_secondary: "{{ git_repo_branch_secondary | default('main') }}" - name: Resources | Tasks | Git | Clone Git Repo | Check for existing git folder ansible.builtin.stat:@@ -28,7 +28,6 @@ - name: Resources | Tasks | Git | Clone Git Repo | Existing repo block when: _git_repo_dest_stat.stat.exists block:- - name: Resources | Tasks | Git | Clone Git Repo | Get current git branch ansible.builtin.shell: | cd {{ _git_repo_dest }}@@ -38,7 +37,7 @@ - name: Resources | Tasks | Git | Clone Git Repo | Update target branch ansible.builtin.set_fact: _git_repo_branch_primary: "{{ _git_repo_branch_secondary }}"- when: _git_repo_branch_secondary == _git_repo_dest_current_branch | trim+ when: (_git_repo_branch_secondary == (_git_repo_dest_current_branch | trim)) - name: Resources | Tasks | Git | Clone Git Repo | git clone repo '{{ _git_repo_branch_primary }}' ansible.builtin.git:@@ -53,9 +52,8 @@ register: _git_clone_status - name: Resources | Tasks | Git | Clone Git Repo | Tasks when above fails- when: _git_clone_status is failed+ when: (_git_clone_status is failed) block:- - name: Resources | Tasks | Git | Clone Git Repo | git clone repo 'master' if above fails ansible.builtin.git: repo: "{{ _git_repo_url }}"@@ -76,4 +74,4 @@ git remote add origin {{ _git_repo_url }} git fetch origin git reset --hard origin/{{ _git_repo_branch_primary }}- when: _git_clone_master_status is failed+ when: (_git_clone_master_status is failed)
modified
resources/tasks/instances/get_info.yml
@@ -7,12 +7,30 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Resources | Tasks | Instances | Get Info | Run Sonarr Tasks- ansible.builtin.include_tasks: sonarr.yml+- name: Resources | Tasks | Instances | Get Info | Run Lidarr Tasks+ ansible.builtin.include_tasks: lidarr.yml vars:- sonarr_name: "{{ item }}"- with_items: "{{ sonarr_instances }}"- when: ('sonarr' in get_info_list)+ lidarr_name: "{{ item }}"+ with_items: "{{ lidarr_instances }}"+ when: ('lidarr' in get_info_list)++- name: Resources | Tasks | Instances | Get Info | Run NZBGet Tasks+ ansible.builtin.include_tasks: nzbget.yml+ when: ('nzbget' in get_info_list)++- name: Resources | Tasks | Instances | Get Info | Run Overseerr Tasks+ ansible.builtin.include_tasks: overseerr.yml+ vars:+ overseerr_name: "{{ item }}"+ with_items: "{{ overseerr_instances }}"+ when: ('overseerr' in get_info_list)++- name: Resources | Tasks | Instances | Get Info | Run Plex Tasks+ ansible.builtin.include_tasks: plex.yml+ vars:+ plex_name: "{{ item }}"+ with_items: "{{ plex_instances }}"+ when: ('plex' in get_info_list) - name: Resources | Tasks | Instances | Get Info | Run Radarr Tasks ansible.builtin.include_tasks: radarr.yml@@ -21,30 +39,13 @@ with_items: "{{ radarr_instances }}" when: ('radarr' in get_info_list) -- name: Resources | Tasks | Instances | Get Info | Run Lidarr Tasks- ansible.builtin.include_tasks: lidarr.yml+- name: Resources | Tasks | Instances | Get Info | Run SABnzbd Tasks+ ansible.builtin.include_tasks: sabnzbd.yml+ when: ('sabnzbd' in get_info_list)++- name: Resources | Tasks | Instances | Get Info | Run Sonarr Tasks+ ansible.builtin.include_tasks: sonarr.yml vars:- lidarr_name: "{{ item }}"- with_items: "{{ lidarr_instances }}"- when: ('lidarr' in get_info_list)--- name: Resources | Tasks | Instances | Get Info | Run Readarr Tasks- ansible.builtin.include_tasks: readarr.yml- vars:- readarr_name: "{{ item }}"- with_items: "{{ readarr_instances }}"- when: ('readarr' in get_info_list)--- name: Resources | Tasks | Instances | Get Info | Run Plex Tasks- ansible.builtin.include_tasks: plex.yml- vars:- plex_name: "{{ item }}"- with_items: "{{ plex_instances }}"- when: ('plex' in get_info_list)--- name: Resources | Tasks | Instances | Get Info | Run Overseerr Tasks- ansible.builtin.include_tasks: overseerr.yml- vars:- overseerr_name: "{{ item }}"- with_items: "{{ overseerr_instances }}"- when: ('overseerr' in get_info_list)+ sonarr_name: "{{ item }}"+ with_items: "{{ sonarr_instances }}"+ when: ('sonarr' in get_info_list)
modified
resources/tasks/instances/lidarr.yml
@@ -9,7 +9,7 @@ --- - name: Resources | Tasks | Instances | Get Info | Check if Lidarr exists ansible.builtin.stat:- path: "{{ lidarr_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='lidarr') }}" register: lidarr_paths_config_location_stat - name: Resources | Tasks | Instances | Get Info | Lidarr API Key tasks@@ -17,16 +17,16 @@ block: - name: Resources | Tasks | Instances | Get Info | Fetch Lidarr API Key community.general.xml:- path: "{{ lidarr_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='lidarr') }}" xpath: /Config/ApiKey content: text register: xmlresp - name: Resources | Tasks | Instances | Get Info | Set 'lidarr_info' variable ansible.builtin.set_fact:- lidarr_info: "{{ lidarr_info | default({}) | combine({lidarr_name: {'name': lidarr_name, 'url': lidarr_web_url, 'api_key': xmlresp.matches[0].ApiKey}}) }}"+ lidarr_info: "{{ lidarr_info | default({}) | combine({lidarr_name: {'name': lidarr_name, 'url': lookup('role_var', '_web_url', role='lidarr'), 'api_key': xmlresp.matches[0].ApiKey}}) }}" - name: Resources | Tasks | Instances | Get Info | Set 'lidarr_info' variable ansible.builtin.set_fact:- lidarr_info: "{{ lidarr_info | default({}) | combine({lidarr_name: {'name': lidarr_name, 'url': lidarr_web_url, 'api_key': 'not installed'}}) }}"+ lidarr_info: "{{ lidarr_info | default({}) | combine({lidarr_name: {'name': lidarr_name, 'url': lookup('role_var', '_web_url', role='lidarr'), 'api_key': 'not installed'}}) }}" when: (not lidarr_paths_config_location_stat.stat.exists)
modified
resources/tasks/instances/overseerr.yml
@@ -9,21 +9,21 @@ --- - name: Resources | Tasks | Instances | Get Info | Check if Overseerr exists ansible.builtin.stat:- path: "{{ overseerr_paths_config_location }}"- register: overseerr_paths_config_location_stat+ path: "{{ lookup('role_var', '_paths_config_location', role='overseerr') }}"+ register: overseerr_role_paths_config_location_stat - name: Resources | Tasks | Instances | Get Info | Overseerr API Key tasks- when: overseerr_paths_config_location_stat.stat.exists+ when: overseerr_role_paths_config_location_stat.stat.exists block: - name: Resources | Tasks | Instances | Get Info | Fetch Overseerr API Key ansible.builtin.set_fact:- jsondata: "{{ lookup('ansible.builtin.file', overseerr_paths_config_location) }}"+ jsondata: "{{ lookup('ansible.builtin.file', lookup('role_var', '_paths_config_location', role='overseerr')) | from_json }}" - name: Resources | Tasks | Instances | Get Info | Set 'overseerr_info' variable ansible.builtin.set_fact:- overseerr_info: "{{ overseerr_info | default({}) | combine({overseerr_name: {'name': overseerr_name, 'url': overseerr_web_url, 'api_key': jsondata.main.apiKey}}) }}"+ overseerr_info: "{{ overseerr_info | default({}) | combine({overseerr_name: {'name': overseerr_name, 'url': lookup('role_var', '_web_url', role='overseerr'), 'api_key': jsondata.main.apiKey}}) }}" - name: Resources | Tasks | Instances | Get Info | Set 'overseerr_info' variable ansible.builtin.set_fact:- overseerr_info: "{{ overseerr_info | default({}) | combine({overseerr_name: {'name': overseerr_name, 'url': overseerr_web_url, 'api_key': 'not installed'}}) }}"- when: (not overseerr_paths_config_location_stat.stat.exists)+ overseerr_info: "{{ overseerr_info | default({}) | combine({overseerr_name: {'name': overseerr_name, 'url': lookup('role_var', '_web_url', role='overseerr'), 'api_key': 'not installed'}}) }}"+ when: (not overseerr_role_paths_config_location_stat.stat.exists)
modified
resources/tasks/instances/plex.yml
@@ -18,4 +18,4 @@ - name: Resources | Tasks | Instances | Get Info | Set 'plex_info' variable ansible.builtin.set_fact:- plex_info: "{{ plex_info | default({}) | combine({plex_name: {'name': plex_name, 'url': plex_web_url, 'token': plex_auth_token}}) }}"+ plex_info: "{{ plex_info | default({}) | combine({plex_name: {'name': plex_name, 'url': lookup('role_var', '_web_url', role='plex'), 'token': plex_auth_token}}) }}"
modified
resources/tasks/instances/radarr.yml
@@ -9,24 +9,24 @@ --- - name: Resources | Tasks | Instances | Get Info | Check if Radarr exists ansible.builtin.stat:- path: "{{ radarr_paths_config_location }}"- register: radarr_paths_config_location_stat+ path: "{{ lookup('role_var', '_paths_config_location', role='radarr') }}"+ register: radarr_role_paths_config_location_stat - name: Resources | Tasks | Instances | Get Info | Radarr API Key tasks- when: radarr_paths_config_location_stat.stat.exists+ when: radarr_role_paths_config_location_stat.stat.exists block: - name: Resources | Tasks | Instances | Get Info | Fetch Radarr API Key community.general.xml:- path: "{{ radarr_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='radarr') }}" xpath: /Config/ApiKey content: text register: xmlresp - name: Resources | Tasks | Instances | Get Info | Set 'radarr_info' variable ansible.builtin.set_fact:- radarr_info: "{{ radarr_info | default({}) | combine({radarr_name: {'name': radarr_name, 'url': radarr_web_url, 'api_key': xmlresp.matches[0].ApiKey}}) }}"+ radarr_info: "{{ radarr_info | default({}) | combine({radarr_name: {'name': radarr_name, 'url': lookup('role_var', '_web_url', role='radarr'), 'api_key': xmlresp.matches[0].ApiKey}}) }}" - name: Resources | Tasks | Instances | Get Info | Set 'radarr_info' variable ansible.builtin.set_fact:- radarr_info: "{{ radarr_info | default({}) | combine({radarr_name: {'name': radarr_name, 'url': radarr_web_url, 'api_key': 'not installed'}}) }}"- when: (not radarr_paths_config_location_stat.stat.exists)+ radarr_info: "{{ radarr_info | default({}) | combine({radarr_name: {'name': radarr_name, 'url': lookup('role_var', '_web_url', role='radarr'), 'api_key': 'not installed'}}) }}"+ when: (not radarr_role_paths_config_location_stat.stat.exists)
modified
resources/tasks/instances/sonarr.yml
@@ -9,24 +9,24 @@ --- - name: Resources | Tasks | Instances | Get Info | Check if Sonarr exists ansible.builtin.stat:- path: "{{ sonarr_paths_config_location }}"- register: sonarr_paths_config_location_stat+ path: "{{ lookup('role_var', '_paths_config_location', role='sonarr') }}"+ register: sonarr_role_paths_config_location_stat - name: Resources | Tasks | Instances | Get Info | Sonarr API Key tasks- when: sonarr_paths_config_location_stat.stat.exists+ when: sonarr_role_paths_config_location_stat.stat.exists block: - name: Resources | Tasks | Instances | Get Info | Fetch Sonarr API Key community.general.xml:- path: "{{ sonarr_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='sonarr') }}" xpath: /Config/ApiKey content: text register: xmlresp - name: Resources | Tasks | Instances | Get Info | Set 'sonarr_info' variable ansible.builtin.set_fact:- sonarr_info: "{{ sonarr_info | default({}) | combine({sonarr_name: {'name': sonarr_name, 'url': sonarr_web_url, 'api_key': xmlresp.matches[0].ApiKey}}) }}"+ sonarr_info: "{{ sonarr_info | default({}) | combine({sonarr_name: {'name': sonarr_name, 'url': lookup('role_var', '_web_url', role='sonarr'), 'api_key': xmlresp.matches[0].ApiKey}}) }}" - name: Resources | Tasks | Instances | Get Info | Set 'sonarr_info' variable ansible.builtin.set_fact:- sonarr_info: "{{ sonarr_info | default({}) | combine({sonarr_name: {'name': sonarr_name, 'url': sonarr_web_url, 'api_key': 'not installed'}}) }}"- when: (not sonarr_paths_config_location_stat.stat.exists)+ sonarr_info: "{{ sonarr_info | default({}) | combine({sonarr_name: {'name': sonarr_name, 'url': lookup('role_var', '_web_url', role='sonarr'), 'api_key': 'not installed'}}) }}"+ when: (not sonarr_role_paths_config_location_stat.stat.exists)
modified
resources/tasks/systemd/delete_service.yml
@@ -28,7 +28,7 @@ - name: Resources | Tasks | systemd | Delete Service | get '{{ _service_file }}' state ansible.builtin.set_fact:- service_running: "{{ (services[_service_file] is defined) and (services[_service_file]['state'] == 'running') }}"+ service_running: "{{ (ansible_facts['services'][_service_file] is defined) and (ansible_facts['services'][_service_file]['state'] == 'running') }}" - name: Resources | Tasks | systemd | Delete Service | stop '{{ _service_file }}' ansible.builtin.systemd_service:@@ -50,4 +50,4 @@ - name: Resources | Tasks | systemd | Delete Service | systemd daemon-reload ansible.builtin.systemd_service:- daemon_reload: yes+ daemon_reload: true
modified
resources/tasks/systemd/disable_service.yml
@@ -28,7 +28,7 @@ - name: Resources | Tasks | systemd | Disable Service | get '{{ _service_file }}' state ansible.builtin.set_fact:- service_running: "{{ (services[_service_file] is defined) and (services[_service_file]['state'] == 'running') }}"+ service_running: "{{ (ansible_facts['services'][_service_file] is defined) and (ansible_facts['services'][_service_file]['state'] == 'running') }}" - name: Resources | Tasks | systemd | Disable Service | stop '{{ _service_file }}' ansible.builtin.systemd_service:
modified
resources/tasks/systemd/stop_service.yml
@@ -28,7 +28,7 @@ - name: Resources | Tasks | systemd | Stop Service | get '{{ _service_file }}' state ansible.builtin.set_fact:- service_running: "{{ (services[_service_file] is defined) and (services[_service_file]['state'] == 'running') }}"+ service_running: "{{ (ansible_facts['services'][_service_file] is defined) and (ansible_facts['services'][_service_file]['state'] == 'running') }}" - name: Resources | Tasks | systemd | Stop Service | stop '{{ _service_file }}' ansible.builtin.systemd_service:
modified
resources/tasks/systemd/update_service_credentials.yml
@@ -39,5 +39,5 @@ - name: Resources | Tasks | systemd | Update Service Credentials | systemd daemon-reload ansible.builtin.systemd_service:- daemon_reload: yes- when: credentials_state is changed+ daemon_reload: true+ when: (credentials_state is changed)
modified
roles/arr_db/defaults/main.yml
@@ -13,7 +13,6 @@ arr_db_sonarr_enabled: false arr_db_radarr_enabled: false arr_db_lidarr_enabled: false-arr_db_readarr_enabled: false arr_db_whisparr_enabled: false arr_db_prowlarr_enabled: false arr_db_tautulli_enabled: false@@ -22,8 +21,8 @@ # Variables ################################ -arr_path_logs_db: "{{ lookup('vars', arr_type + '_paths_location') }}/logs.db"-arr_path_main_db: "{{ lookup('vars', arr_type + '_paths_location') }}/{{ arr_type }}.db"+arr_path_logs_db: "{{ lookup('vars', arr_type + '_role_paths_location') }}/logs.db"+arr_path_main_db: "{{ lookup('vars', arr_type + '_role_paths_location') }}/{{ arr_type }}.db" arr_db_files: - "logs.db" - "{{ arr_type }}.db"
modified
roles/arr_db/tasks/arr.yml
@@ -48,7 +48,7 @@ mode: "0775" - name: "{{ arr_type | capitalize }} | Backup databases"- ansible.builtin.shell: "cp '{{ lookup('vars', arr_type + '_paths_location') }}/{{ item }}' /tmp/{{ lookup('vars', arr_type + '_name') }}_backup/"+ ansible.builtin.shell: "cp '{{ lookup('vars', arr_type + '_role_paths_location') }}/{{ item }}' /tmp/{{ lookup('vars', arr_type + '_name') }}_backup/" loop: "{{ arr_db_files }}" - name: "{{ arr_type | capitalize }} | Vacuum '{{ arr_path_main_db | basename }}' database"@@ -74,12 +74,12 @@ rescue: - name: "{{ arr_type | capitalize }} | Delete wal and shm files" ansible.builtin.file:- path: "{{ lookup('vars', arr_type + '_paths_location') }}/{{ item }}"+ path: "{{ lookup('vars', arr_type + '_role_paths_location') }}/{{ item }}" state: absent with_items: "{{ arr_db_temp_files }}" - name: "{{ arr_type | capitalize }} | Restore database backup"- ansible.builtin.shell: "cp -f '/tmp/{{ lookup('vars', arr_type + '_name') }}_backup/{{ item }}' '{{ lookup('vars', arr_type + '_paths_location') }}/{{ item }}'"+ ansible.builtin.shell: "cp -f '/tmp/{{ lookup('vars', arr_type + '_name') }}_backup/{{ item }}' '{{ lookup('vars', arr_type + '_role_paths_location') }}/{{ item }}'" loop: "{{ arr_db_files }}" - name: "{{ arr_type | capitalize }} | Start Docker container"
modified
roles/arr_db/tasks/main.yml
@@ -13,13 +13,13 @@ - "You must enable at least one of the role variables to run this role." - "Use the Saltbox inventory to enable." when: not (arr_db_sonarr_enabled or arr_db_lidarr_enabled- or arr_db_readarr_enabled or arr_db_whisparr_enabled- or arr_db_prowlarr_enabled or arr_db_tautulli_enabled)+ or arr_db_whisparr_enabled or arr_db_prowlarr_enabled+ or arr_db_tautulli_enabled) - name: Install SQLite3 ansible.builtin.apt: name: sqlite3- state: present+ state: latest - name: "Execute Sonarr DB tasks" ansible.builtin.include_tasks: arr.yml@@ -51,16 +51,6 @@ loop_control: loop_var: instance -- name: "Execute Readarr DB tasks"- ansible.builtin.include_tasks: arr.yml- when: arr_db_readarr_enabled- vars:- readarr_name: "{{ instance }}"- arr_type: "readarr"- with_items: "{{ readarr_instances }}"- loop_control:- loop_var: instance- - name: "Execute Whisparr DB tasks" ansible.builtin.include_tasks: arr.yml when: arr_db_whisparr_enabled
modified
roles/arr_db/tasks/tautulli.yml
@@ -9,7 +9,7 @@ --- - name: "Tautulli | Check if '{{ arr_db_tautulli_database }}' exists" ansible.builtin.stat:- path: "{{ tautulli_paths_location }}/{{ arr_db_tautulli_database }}"+ path: "{{ tautulli_role_paths_location }}/{{ arr_db_tautulli_database }}" register: arr_db - name: "Tautulli | Fail if database does not exist"@@ -23,7 +23,7 @@ var_prefix: "tautulli" - name: "Tautulli | Check if database passes integrity_check"- ansible.builtin.shell: "sqlite3 '{{ tautulli_paths_location }}/{{ arr_db_tautulli_database }}' 'PRAGMA integrity_check(1)'"+ ansible.builtin.shell: "sqlite3 '{{ tautulli_role_paths_location }}/{{ arr_db_tautulli_database }}' 'PRAGMA integrity_check(1)'" register: arr_db_integrity_check failed_when: (arr_db_integrity_check.stdout != 'ok') @@ -38,27 +38,27 @@ mode: "0775" - name: "Tautulli | Backup databases"- ansible.builtin.shell: "cp '{{ tautulli_paths_location }}/{{ arr_db_tautulli_database }}' /tmp/{{ tautulli_name }}_backup/"+ ansible.builtin.shell: "cp '{{ tautulli_role_paths_location }}/{{ arr_db_tautulli_database }}' /tmp/{{ tautulli_name }}_backup/" - name: "Tautulli | Vacuum '{{ arr_db_tautulli_database | basename }}' database"- ansible.builtin.shell: "sqlite3 '{{ tautulli_paths_location }}/{{ arr_db_tautulli_database }}' 'VACUUM;'"+ ansible.builtin.shell: "sqlite3 '{{ tautulli_role_paths_location }}/{{ arr_db_tautulli_database }}' 'VACUUM;'" register: arr_db_vacuum failed_when: (arr_db_vacuum.rc != 0) - name: "Tautulli | Reindex '{{ arr_db_tautulli_database | basename }}' database"- ansible.builtin.shell: "sqlite3 '{{ tautulli_paths_location }}/{{ arr_db_tautulli_database }}' 'REINDEX;'"+ ansible.builtin.shell: "sqlite3 '{{ tautulli_role_paths_location }}/{{ arr_db_tautulli_database }}' 'REINDEX;'" register: arr_db_reindex failed_when: (arr_db_reindex.rc != 0) rescue: - name: "Tautulli | Delete wal and shm files" ansible.builtin.file:- path: "{{ tautulli_paths_location }}/{{ item }}"+ path: "{{ tautulli_role_paths_location }}/{{ item }}" state: absent with_items: "{{ arr_db_tautulli_temp_files }}" - name: "Tautulli | Restore database backup"- ansible.builtin.shell: "cp -f '/tmp/{{ tautulli_name }}_backup/{{ arr_db_tautulli_database }}' '{{ tautulli_paths_location }}/{{ arr_db_tautulli_database }}'"+ ansible.builtin.shell: "cp -f '/tmp/{{ tautulli_name }}_backup/{{ arr_db_tautulli_database }}' '{{ tautulli_role_paths_location }}/{{ arr_db_tautulli_database }}'" - name: "Tautulli | Start Docker container" ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_docker_container.yml"
modified
roles/asshama/tasks/subtasks/loop.yml
@@ -22,28 +22,28 @@ owner: "{{ user.name }}" group: "{{ user.name }}" with_items:- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Scanners'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Scanners/Series'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/AniDB'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/Plex'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/OMDB'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TMDB'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/blank'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/_cache/fanart/original'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/episodes'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/fanart/original'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/fanart/vignette'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/graphical'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/posters'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/seasons'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/seasonswide'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/text'- - '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/FanartTV'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Scanners'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Scanners/Series'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/AniDB'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/Plex'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/OMDB'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TMDB'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/blank'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/_cache/fanart/original'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/episodes'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/fanart/original'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/fanart/vignette'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/graphical'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/posters'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/seasons'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/seasonswide'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/TVDB/text'+ - '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems/FanartTV' - name: "Get latest Absolute Series Scanner.py" ansible.builtin.get_url: url: https://raw.githubusercontent.com/ZeroQI/Absolute-Series-Scanner/master/Scanners/Series/Absolute%20Series%20Scanner.py- dest: "{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Scanners/Series/Absolute Series Scanner.py"+ dest: "{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Scanners/Series/Absolute Series Scanner.py" mode: "0775" register: x until: "x is not failed"@@ -54,7 +54,7 @@ - name: "Set ASS directory permissions" ansible.builtin.file:- path: '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Scanners'+ path: '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Scanners' state: directory mode: "0775" owner: "{{ user.name }}"@@ -64,7 +64,7 @@ - name: "Clone Hama Bundle" ansible.builtin.git: repo: https://github.com/ZeroQI/Hama.bundle.git- dest: '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-ins/Hama.bundle'+ dest: '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-ins/Hama.bundle' clone: true version: HEAD force: true@@ -73,7 +73,7 @@ - name: "Set HAMA directory permissions" ansible.builtin.file:- path: '{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-ins/Hama.bundle'+ path: '{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-ins/Hama.bundle' state: directory mode: "0775" owner: "{{ user.name }}"
modified
roles/authelia/defaults/main.yml
@@ -17,268 +17,330 @@ # Settings ################################ +# Themes - Sub-section Start # Options are light, dark, grey or auto.-authelia_theme: "auto"--# Options are file or ldap-authelia_authentication_backend: "file"-authelia_authentication_backend_password_reset_disable: "false"-authelia_authentication_backend_password_reset_custom_url: ""-authelia_authentication_backend_refresh_interval: "5m"-authelia_authentication_backend_file_path: "/config/users_database.yml"-authelia_authentication_backend_file_watch: "true"-authelia_authentication_backend_file_password_algorithm: "argon2"-authelia_authentication_backend_file_password_argon2_variant: "argon2id"-authelia_authentication_backend_file_password_argon2_iterations: "3"-authelia_authentication_backend_file_password_argon2_memory: "65536"-authelia_authentication_backend_file_password_argon2_parallelism: "4"-authelia_authentication_backend_file_password_argon2_key_length: "32"-authelia_authentication_backend_file_password_argon2_salt_length: "16"-+authelia_role_theme: "auto"+# Themes - Sub-section End++# Logs - Sub-section Start+# [GLOBAL] Reference: https://www.authelia.com/configuration/miscellaneous/logging/+# [NOGLOBAL] Logrotate configuration variable+authelia_role_log_max_backups: "3"+# [NOGLOBAL] Logrotate configuration variable+authelia_role_log_max_size: "10"+authelia_role_log_level: "info"+authelia_role_log_format: "text"+authelia_role_log_file_path: "/config/authelia.log"+authelia_role_log_keep_stdout: true+# Logs - Sub-section End++# Authentication Backend - Sub-section Start+# [GLOBAL] https://www.authelia.com/configuration/first-factor/introduction/+# [GLOBAL] https://www.authelia.com/configuration/first-factor/file/+# [NOGLOBAL] Options are file or ldap+authelia_role_authentication_backend: "file"+authelia_role_authentication_backend_password_change_disable: false+authelia_role_authentication_backend_password_reset_disable: false+authelia_role_authentication_backend_password_reset_custom_url: ""+authelia_role_authentication_backend_refresh_interval: "5m"+authelia_role_authentication_backend_file_path: "/config/users_database.yml"+authelia_role_authentication_backend_file_watch: true+authelia_role_authentication_backend_file_password_algorithm: "argon2"+authelia_role_authentication_backend_file_password_argon2_variant: "argon2id"+authelia_role_authentication_backend_file_password_argon2_iterations: "3"+authelia_role_authentication_backend_file_password_argon2_memory: "65536"+authelia_role_authentication_backend_file_password_argon2_parallelism: "4"+authelia_role_authentication_backend_file_password_argon2_key_length: "32"+authelia_role_authentication_backend_file_password_argon2_salt_length: "16"+# Authentication Backend - Sub-section End++# Access Control - Sub-section Start # Setting for default Access Control Policy - recommended options one_factor or two_factor # Reference: https://www.authelia.com/configuration/security/access-control/#one_factor-authelia_access_control_policy: "one_factor"--# Settings for Duo-# Reference: https://www.authelia.com/configuration/second-factor/duo/-authelia_duo_enabled: false-authelia_duo_hostname: ""-authelia_duo_integration_key: ""-authelia_duo_secret_key: ""-authelia_duo_self_enrollment: "true"--# Settings for Webauthn-# Reference: https://www.authelia.com/configuration/second-factor/webauthn/-authelia_webauthn_disable: false-authelia_webauthn_display_name: "Authelia"-authelia_webauthn_attestation_conveyance_preference: "indirect"-authelia_webauthn_user_verification: "preferred"-authelia_webauthn_timeout: "60s"--# Settings for Notifier-# Reference: https://www.authelia.com/configuration/notifications/introduction/-# Options are filesystem or smtp. Options specific to smtp prefixed with smtp-authelia_notifier: "filesystem"-authelia_notifier_disable_startup_check: "false"-authelia_notifier_smtp_host: ""-authelia_notifier_smtp_port: ""-authelia_notifier_smtp_timeout: ""-authelia_notifier_smtp_username: ""-authelia_notifier_smtp_password: ""-authelia_notifier_smtp_sender: ""-authelia_notifier_smtp_identifier: ""-authelia_notifier_smtp_subject: ""-authelia_notifier_smtp_startup_check_address: ""-authelia_notifier_smtp_disable_require_tls: ""-authelia_notifier_smtp_disable_html_emails: ""-authelia_notifier_smtp_tls_server_name: ""-authelia_notifier_smtp_tls_skip_verify: ""-authelia_notifier_smtp_tls_minimum_version: ""--# Settings for Authelia's server-# Reference: https://www.authelia.com/configuration/miscellaneous/server/-# https://www.authelia.com/c/server#buffer-sizes-authelia_server_address: "0.0.0.0:9091"-authelia_server_buffers_read: "10485760"-authelia_server_buffers_write: "10485760"-authelia_server_enable_pprof: "false"-authelia_server_enable_expvars: "false"-authelia_server_disable_healthcheck: "false"-authelia_server_headers_csp_template: "default-src 'self' *.{{ user.domain }} {{ user.domain }}; script-src 'self' *.{{ user.domain }} {{ user.domain }}; script-src-elem 'self' *.{{ user.domain }} {{ user.domain }}; script-src-attr 'self' *.{{ user.domain }} {{ user.domain }}; style-src 'self' *.{{ user.domain }} {{ user.domain }} 'nonce-${NONCE}'; style-src-elem 'self' *.{{ user.domain }} {{ user.domain }} 'nonce-${NONCE}'; style-src-attr 'self' *.{{ user.domain }} {{ user.domain }} 'nonce-${NONCE}'; img-src 'self' *.{{ user.domain }} {{ user.domain }}; font-src 'self' *.{{ user.domain }} {{ user.domain }}; connect-src 'self' *.{{ user.domain }} {{ user.domain }}; media-src 'self' *.{{ user.domain }} {{ user.domain }}; object-src 'self' *.{{ user.domain }} {{ user.domain }}; child-src 'self' *.{{ user.domain }} {{ user.domain }}; frame-src 'self' *.{{ user.domain }} {{ user.domain }}; worker-src 'self' *.{{ user.domain }} {{ user.domain }}; frame-ancestors 'self' *.{{ user.domain }} {{ user.domain }}; form-action 'self' *.{{ user.domain }} {{ user.domain }}; base-uri 'self'"--# Settings for Logging-# Reference: https://www.authelia.com/configuration/miscellaneous/logging/-authelia_log_level: "info"-authelia_log_format: "text"-authelia_log_file_path: "/config/authelia.log"-authelia_log_keep_stdout: "true"--# JWT-authelia_jwt_secret: "{{ lookup('password', '/dev/null', chars=['ascii_letters', 'digits'], length=32) }}"--# TOTP-authelia_totp_issuer: "authelia.com"-authelia_totp_period: "30"-authelia_totp_skew: "1"-authelia_totp_digits: "6"-authelia_totp_secret_size: "32"--# Default redirection-authelia_default_redirection_url: ""--# Default 2FA Method-authelia_default_2fa_method: ""--# NTP-authelia_ntp_address: "time.cloudflare.com:123"-authelia_ntp_version: "3"-authelia_ntp_max_desync: "3s"-authelia_ntp_disable_startup_check: "false"-authelia_ntp_disable_failure: "false"--# Password Policy-authelia_password_policy_standard_enabled: "false"-authelia_password_policy_standard_min_length: "8"-authelia_password_policy_standard_max_length: "0"-authelia_password_policy_standard_require_uppercase: "true"-authelia_password_policy_standard_require_lowercase: "true"-authelia_password_policy_standard_require_number: "true"-authelia_password_policy_standard_require_special: "true"-authelia_password_policy_zxcvbn_enabled: "false"-authelia_password_policy_zxcvbn_min_score: "3"--# Access Control-authelia_access_control_whitelist_host: false-authelia_access_control_default_policy: "deny"-authelia_access_control_rules:+authelia_role_access_control_policy: "one_factor"+# Whitelists the host IPv4/IPv6 addresses depending on which are enabled+authelia_role_access_control_whitelist_host: false+# Whitelists the saltbox Docker network IP subnet+authelia_role_access_control_whitelist_docker: false+# Skip docs+authelia_role_access_control_default_policy: "deny"+# Skip docs+authelia_role_access_control_rules: - domain: - "{{ '*.' + user.domain | lower }}" - "{{ user.domain | lower }}"- policy: "{{ authelia_access_control_policy }}"--authelia_access_control_whitelist_rules_lookup: "{{ authelia_access_control_whitelist_rules if authelia_access_control_whitelist_host and ((dns.ipv4 | default(true)) or (dns.ipv6 | default(false))) else [] }}"--authelia_access_control_whitelist_rules:+ policy: "{{ lookup('role_var', '_access_control_policy', role='authelia') }}"++# Skip docs+authelia_role_access_control_whitelist_rules_lookup: "{{ lookup('role_var', '_access_control_whitelist_rules_docker_lookup', role='authelia')+ + lookup('role_var', '_access_control_whitelist_rules_host_lookup', role='authelia') }}"+++# Skip docs+authelia_role_access_control_whitelist_rules_docker_lookup: "{{ lookup('role_var', '_access_control_whitelist_rules_docker', role='authelia')+ if lookup('role_var', '_access_control_whitelist_docker', role='authelia')+ else [] }}"++# Skip docs+authelia_role_access_control_whitelist_rules_host_lookup: "{{ lookup('role_var', '_access_control_whitelist_rules_host', role='authelia')+ if lookup('role_var', '_access_control_whitelist_host', role='authelia') and (dns_ipv4_enabled or dns_ipv6_enabled)+ else [] }}"++# Skip docs+authelia_role_access_control_whitelist_rules_host: - domain: - "{{ '*.' + user.domain }}" - "{{ user.domain }}" policy: bypass- networks: "{{ authelia_access_control_whitelist_networks | unique | reject('equalto', omit) | list }}"--authelia_access_control_whitelist_networks:- - "{{ (ip_address_public + '/32') if (dns.ipv4 | default(true)) else omit }}"- - "{{ (ipv6_address_public + '/128') if (dns.ipv6 | default(false)) else omit }}"+ networks: "{{ lookup('role_var', '_access_control_whitelist_networks_host', role='authelia') | unique | reject('equalto', omit) | list }}"++# Skip docs+authelia_role_access_control_whitelist_rules_docker:+ - domain:+ - "{{ '*.' + user.domain }}"+ - "{{ user.domain }}"+ policy: bypass+ networks: "{{ lookup('role_var', '_access_control_whitelist_networks_docker', role='authelia') | unique | reject('equalto', omit) | list }}"++# Skip docs+authelia_role_access_control_whitelist_networks_docker:+ - "172.19.0.0/16"++# Skip docs+authelia_role_access_control_whitelist_networks_host:+ - "{{ (ip_address_public + '/32') if dns_ipv4_enabled else omit }}"+ - "{{ (ipv6_address_public + '/128') if dns_ipv6_enabled else omit }}"+# Access Control - Sub-section End++# Second Factor - Sub-section Start+authelia_role_default_2fa_method: ""+# Second Factor - Sub-section End++# Second Factor - Duo - Sub-section Start+# [GLOBAL] Reference: https://www.authelia.com/configuration/second-factor/duo/+authelia_role_duo_enabled: false+authelia_role_duo_hostname: ""+authelia_role_duo_integration_key: ""+authelia_role_duo_secret_key: ""+authelia_role_duo_self_enrollment: true+# Second Factor - Duo - Sub-section End++# Second Factor - Webauthn - Sub-section Start+# [GLOBAL] Reference: https://www.authelia.com/configuration/second-factor/webauthn/+authelia_role_webauthn_disable: false+authelia_role_webauthn_enable_passkey_login: false+authelia_role_webauthn_display_name: "Authelia"+authelia_role_webauthn_attestation_conveyance_preference: "indirect"+authelia_role_webauthn_timeout: "60s"+authelia_role_webauthn_filtering_prohibit_backup_eligibility: false+authelia_role_webauthn_filtering_permitted_aaguids: []+authelia_role_webauthn_filtering_prohibited_aaguids: []+authelia_role_webauthn_selection_criteria_attachment: "cross-platform"+authelia_role_webauthn_selection_criteria_discoverability: "discouraged"+authelia_role_webauthn_selection_criteria_user_verification: "preferred"+authelia_role_webauthn_metadata_enabled: false+authelia_role_webauthn_metadata_cache_policy: "strict"+authelia_role_webauthn_metadata_validate_trust_anchor: true+authelia_role_webauthn_metadata_validate_entry: true+authelia_role_webauthn_metadata_validate_entry_permit_zero_aaguid: false+authelia_role_webauthn_metadata_validate_status: true+authelia_role_webauthn_metadata_validate_status_permitted: []+authelia_role_webauthn_metadata_validate_status_prohibited: []+# Second Factor - Webauthn - Sub-section End++# Notifier - Sub-section Start+# [GLOBAL] Reference: https://www.authelia.com/configuration/notifications/introduction/+# Options are filesystem or smtp. Options specific to smtp prefixed with smtp+authelia_role_notifier: "filesystem"+authelia_role_notifier_disable_startup_check: false+authelia_role_notifier_smtp_host: ""+authelia_role_notifier_smtp_port: ""+authelia_role_notifier_smtp_timeout: ""+authelia_role_notifier_smtp_username: ""+authelia_role_notifier_smtp_password: ""+authelia_role_notifier_smtp_sender: ""+authelia_role_notifier_smtp_identifier: ""+authelia_role_notifier_smtp_subject: ""+authelia_role_notifier_smtp_startup_check_address: ""+authelia_role_notifier_smtp_disable_require_tls: ""+authelia_role_notifier_smtp_disable_html_emails: ""+authelia_role_notifier_smtp_tls_server_name: ""+authelia_role_notifier_smtp_tls_skip_verify: ""+authelia_role_notifier_smtp_tls_minimum_version: ""+# Notifier - Sub-section End++# Server - Sub-section Start+# [GLOBAL] Reference: https://www.authelia.com/configuration/miscellaneous/server/+authelia_role_server_address: "0.0.0.0:9091"+authelia_role_server_asset_path: ""+authelia_role_server_disable_healthcheck: false+authelia_role_server_buffers_read: "10485760"+authelia_role_server_buffers_write: "10485760"+authelia_role_server_timeouts_read: "6s"+authelia_role_server_timeouts_write: "6s"+authelia_role_server_timeouts_idle: "30s"+authelia_role_server_endpoints_enable_pprof: false+authelia_role_server_endpoints_enable_expvars: false+authelia_role_server_headers_csp_template: "default-src 'self' *.{{ user.domain }} {{ user.domain }}; script-src 'self' *.{{ user.domain }} {{ user.domain }}; script-src-elem 'self' *.{{ user.domain }} {{ user.domain }}; script-src-attr 'self' *.{{ user.domain }} {{ user.domain }}; style-src 'self' *.{{ user.domain }} {{ user.domain }} 'nonce-${NONCE}'; style-src-elem 'self' *.{{ user.domain }} {{ user.domain }} 'nonce-${NONCE}'; style-src-attr 'self' *.{{ user.domain }} {{ user.domain }} 'nonce-${NONCE}'; img-src 'self' *.{{ user.domain }} {{ user.domain }}; font-src 'self' *.{{ user.domain }} {{ user.domain }}; connect-src 'self' *.{{ user.domain }} {{ user.domain }}; media-src 'self' *.{{ user.domain }} {{ user.domain }}; object-src 'self' *.{{ user.domain }} {{ user.domain }}; child-src 'self' *.{{ user.domain }} {{ user.domain }}; frame-src 'self' *.{{ user.domain }} {{ user.domain }}; worker-src 'self' *.{{ user.domain }} {{ user.domain }}; frame-ancestors 'self' *.{{ user.domain }} {{ user.domain }}; form-action 'self' *.{{ user.domain }} {{ user.domain }}; base-uri 'self'"+# Server - Sub-section End++# Metrics - Sub-section Start+# [GLOBAL] Reference: https://www.authelia.com/configuration/telemetry/metrics/+authelia_role_telemetry_metrics_enabled: false+authelia_role_telemetry_metrics_address: "tcp://0.0.0.0:9959"+authelia_role_telemetry_metrics_buffers_read: "4096"+authelia_role_telemetry_metrics_buffers_write: "4096"+authelia_role_telemetry_metrics_timeouts_read: "6s"+authelia_role_telemetry_metrics_timeouts_write: "6s"+authelia_role_telemetry_metrics_timeouts_idle: "30s"+# Metrics - Sub-section End++# Identity Validation - Sub-section Start+authelia_role_identity_validation_reset_password_jwt_lifespan: "5m"+authelia_role_identity_validation_reset_password_jwt_algorithm: "HS256"+authelia_role_identity_validation_elevated_session_code_lifespan: "5m"+authelia_role_identity_validation_elevated_session_elevation_lifespan: "10m"+authelia_role_identity_validation_elevated_session_characters: "8"+authelia_role_identity_validation_elevated_session_require_second_factor: false+authelia_role_identity_validation_elevated_session_skip_second_factor: false+# Identity Validation - Sub-section End++# JWT - Sub-section Start+authelia_role_jwt_secret: "{{ lookup('password', '/dev/null', chars=['ascii_letters', 'digits'], length=32) }}"+# JWT - Sub-section End++# TOTP - Sub-section Start+authelia_role_totp_disable: false+authelia_role_totp_issuer: "{{ lookup('role_var', '_web_subdomain', role='authelia') + '.' + lookup('role_var', '_web_domain', role='authelia') }}"+authelia_role_totp_algorithm: "SHA1"+authelia_role_totp_digits: "6"+authelia_role_totp_period: "30"+authelia_role_totp_skew: "1"+authelia_role_totp_secret_size: "32"+authelia_role_totp_allowed_algorithms: ["SHA1"]+authelia_role_totp_allowed_digits: ["6"]+authelia_role_totp_allowed_periods: ["30"]+authelia_role_totp_disable_reuse_security_policy: false+# TOTP - Sub-section End++# Session - Sub-section Start+authelia_role_default_redirection_url: ""+# Session - Sub-section End++# NTP - Sub-section Start+authelia_role_ntp_address: "time.cloudflare.com:123"+authelia_role_ntp_version: "3"+authelia_role_ntp_max_desync: "3s"+authelia_role_ntp_disable_startup_check: false+authelia_role_ntp_disable_failure: false+# NTP - Sub-section End++# Password Policy - Sub-section Start+authelia_role_password_policy_standard_enabled: false+authelia_role_password_policy_standard_min_length: "8"+authelia_role_password_policy_standard_max_length: "0"+authelia_role_password_policy_standard_require_uppercase: true+authelia_role_password_policy_standard_require_lowercase: true+authelia_role_password_policy_standard_require_number: true+authelia_role_password_policy_standard_require_special: true+authelia_role_password_policy_zxcvbn_enabled: false+authelia_role_password_policy_zxcvbn_min_score: "3"+# Password Policy - Sub-section End++# Skip docs+authelia_role_response_headers:+ - "Remote-User"+ - "Remote-Groups"+ - "Remote-Name"+ - "Remote-Email" ################################ # Paths ################################ -authelia_paths_folder: "{{ authelia_name }}"-authelia_paths_location: "{{ server_appdata_path }}/{{ authelia_paths_folder }}"-authelia_paths_folders_list:- - "{{ authelia_paths_location }}"+authelia_role_paths_folder: "{{ authelia_name }}"+authelia_role_paths_location: "{{ server_appdata_path }}/{{ authelia_role_paths_folder }}"+authelia_role_paths_folders_list:+ - "{{ authelia_role_paths_location }}" ################################ # Web ################################ -authelia_web_subdomain: "{{ authelia.subdomain }}"-authelia_web_domain: "{{ user.domain }}"-authelia_web_port: "9091"-authelia_web_url: "{{ 'https://' + (authelia_web_subdomain + '.' + authelia_web_domain- if (authelia_web_subdomain | length > 0)- else authelia_web_domain) }}"+authelia_role_web_subdomain: "{{ authelia.subdomain }}"+authelia_role_web_domain: "{{ user.domain }}"+authelia_role_web_port: "9091"+authelia_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='authelia') + '.' + lookup('role_var', '_web_domain', role='authelia')+ if (lookup('role_var', '_web_subdomain', role='authelia') | length > 0)+ else lookup('role_var', '_web_domain', role='authelia')) }}" ################################ # DNS ################################ -authelia_dns_record: "{{ authelia_web_subdomain }}"-authelia_dns_zone: "{{ authelia_web_domain }}"-authelia_dns_proxy: "{{ dns.proxied }}"+authelia_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='authelia') }}"+authelia_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='authelia') }}"+authelia_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -authelia_traefik_sso_middleware: ""-authelia_traefik_middleware_default: "{{ traefik_default_middleware }}"-authelia_traefik_middleware_custom: ""-authelia_traefik_certresolver: "{{ traefik_default_certresolver }}"-authelia_traefik_enabled: true-authelia_traefik_api_enabled: false-authelia_traefik_api_endpoint: ""+authelia_role_traefik_sso_middleware: ""+authelia_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+authelia_role_traefik_middleware_custom: ""+authelia_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+authelia_role_traefik_enabled: true+authelia_role_traefik_api_enabled: false+authelia_role_traefik_api_endpoint: "" ################################ # Docker ################################ # Container-authelia_docker_container: "{{ authelia_name }}"+authelia_role_docker_container: "{{ authelia_name }}" # Image-authelia_docker_image_pull: true-authelia_docker_image_tag: "4.38"-authelia_docker_image: "authelia/authelia:{{ authelia_docker_image_tag }}"--# Ports-authelia_docker_ports_defaults: []-authelia_docker_ports_custom: []-authelia_docker_ports: "{{ authelia_docker_ports_defaults- + authelia_docker_ports_custom }}"+authelia_role_docker_image_pull: true+authelia_role_docker_image_repo: "authelia/authelia"+authelia_role_docker_image_tag: "4.39"+authelia_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='authelia') }}:{{ lookup('role_var', '_docker_image_tag', role='authelia') }}" # Envs-authelia_docker_envs_default:+authelia_role_docker_envs_default: TZ: "{{ tz }}" PUID: "{{ uid }}" PGID: "{{ gid }}"-authelia_docker_envs_custom: {}-authelia_docker_envs: "{{ authelia_docker_envs_default- | combine(authelia_docker_envs_custom) }}"--# Commands-authelia_docker_commands_default: []-authelia_docker_commands_custom: []-authelia_docker_commands: "{{ authelia_docker_commands_default- + authelia_docker_commands_custom }}"+authelia_role_docker_envs_custom: {}+authelia_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='authelia')+ | combine(lookup('role_var', '_docker_envs_custom', role='authelia')) }}" # Volumes-authelia_docker_volumes_default:- - "{{ authelia_paths_location }}:/config"-authelia_docker_volumes_custom: []-authelia_docker_volumes: "{{ authelia_docker_volumes_default- + authelia_docker_volumes_custom }}"--# Devices-authelia_docker_devices_default: []-authelia_docker_devices_custom: []-authelia_docker_devices: "{{ authelia_docker_devices_default- + authelia_docker_devices_custom }}"--# Hosts-authelia_docker_hosts_default: {}-authelia_docker_hosts_custom: {}-authelia_docker_hosts: "{{ docker_hosts_common- | combine(authelia_docker_hosts_default)- | combine(authelia_docker_hosts_custom) }}"--# Labels-authelia_docker_labels_default: {}-authelia_docker_labels_custom: {}-authelia_docker_labels: "{{ docker_labels_common- | combine(authelia_docker_labels_default)- | combine(authelia_docker_labels_custom) }}"+authelia_role_docker_volumes_default:+ - "{{ authelia_role_paths_location }}:/config"+authelia_role_docker_volumes_custom: []+authelia_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='authelia')+ + lookup('role_var', '_docker_volumes_custom', role='authelia') }}" # Hostname-authelia_docker_hostname: "{{ authelia_name }}"+authelia_role_docker_hostname: "{{ authelia_name }}" # Networks-authelia_docker_networks_alias: "{{ authelia_name }}"-authelia_docker_networks_default: []-authelia_docker_networks_custom: []-authelia_docker_networks: "{{ docker_networks_common- + authelia_docker_networks_default- + authelia_docker_networks_custom }}"--# Capabilities-authelia_docker_capabilities_default: []-authelia_docker_capabilities_custom: []-authelia_docker_capabilities: "{{ authelia_docker_capabilities_default- + authelia_docker_capabilities_custom }}"--# Security Opts-authelia_docker_security_opts_default: []-authelia_docker_security_opts_custom: []-authelia_docker_security_opts: "{{ authelia_docker_security_opts_default- + authelia_docker_security_opts_custom }}"+authelia_role_docker_networks_alias: "{{ authelia_name }}"+authelia_role_docker_networks_default: []+authelia_role_docker_networks_custom: []+authelia_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='authelia')+ + lookup('role_var', '_docker_networks_custom', role='authelia') }}" # Restart Policy-authelia_docker_restart_policy: unless-stopped+authelia_role_docker_restart_policy: unless-stopped # State-authelia_docker_state: started+authelia_role_docker_state: started # Dependencies-authelia_depends_on: "{{ 'authelia-redis,lldap' if (authelia_authentication_backend == 'ldap') else 'authelia-redis' }}"-authelia_depends_on_delay: "0"-authelia_depends_on_healthchecks: "{{ 'true' if (authelia_authentication_backend == 'ldap') else 'false' }}"+authelia_role_depends_on: "{{ 'authelia-redis,lldap' if (lookup('role_var', '_authentication_backend', role='authelia') == 'ldap') else 'authelia-redis' }}"+authelia_role_depends_on_delay: "0"+authelia_role_depends_on_healthchecks: "{{ 'true' if (lookup('role_var', '_authentication_backend', role='authelia') == 'ldap') else 'false' }}"
modified
roles/authelia/tasks/main.yml
@@ -14,15 +14,15 @@ - name: "Fail if invalid Authentication Backend" ansible.builtin.fail:- msg: "authelia_authentication_backend is not valid. Use 'file' or 'ldap'."- when: not authelia_authentication_backend in ["file", "ldap"]+ msg: "{{ authelia_name }}_authentication_backend or {{ authelia_name }}_role_authentication_backend is not valid. Use 'file' or 'ldap'."+ when: not lookup('role_var', '_authentication_backend') in ["file", "ldap"] - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml" - name: Reset Authelia directory ansible.builtin.file:- path: "{{ authelia_paths_location }}"+ path: "{{ authelia_role_paths_location }}" state: absent when: ('authelia-reset' in ansible_run_tags) @@ -31,29 +31,29 @@ - name: Check if 'configuration.yml' exists ansible.builtin.stat:- path: "{{ authelia_paths_location }}/configuration.yml"+ path: "{{ authelia_role_paths_location }}/configuration.yml" register: authelia_config_stat - name: Load authentication_backend value ansible.builtin.shell: |- yyq '.authentication_backend' {{ authelia_paths_location }}/configuration.yml+ yyq '.authentication_backend' {{ authelia_role_paths_location }}/configuration.yml register: authentication_backend_key when: authelia_config_stat.stat.exists and (not 'authelia-reset' in ansible_run_tags) - name: Fail if Configuration was generated with incorrect authentication_backend ansible.builtin.fail: msg:- - "You seem to have changed 'authelia_authentication_backend' but the old configuration still exists."+ - "You seem to have changed '{{ authelia_name }}_authentication_backend' or '{{ authelia_name }}_role_authentication_backend' but the old configuration still exists." - "Backup the Authelia opt folder if you want to keep it and then run the 'authelia-reset' tag." - "Port any modifications made to your configurations.yml from the backup that you want to keep." when: - (not 'authelia-reset' in ansible_run_tags) - authelia_config_stat.stat.exists- - ((('ldap' in authentication_backend_key.stdout) and (authelia_authentication_backend == 'file'))- or (('file' in authentication_backend_key.stdout) and (authelia_authentication_backend == 'ldap')))+ - ((('ldap' in authentication_backend_key.stdout) and (lookup('role_var', '_authentication_backend') == 'file'))+ or (('file' in authentication_backend_key.stdout) and (lookup('role_var', '_authentication_backend') == 'ldap'))) - name: Authentication Backend Task- ansible.builtin.import_tasks: "subtasks/auth_backend.yml"+ ansible.builtin.include_tasks: "subtasks/auth_backend.yml" - name: Remove legacy Redis Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -65,7 +65,7 @@ - name: Cleanup legacy Redis directory ansible.builtin.file:- path: "/opt/authelia_redis"+ path: "{{ server_appdata_path }}/authelia_redis" state: absent - name: "Import Redis Role"@@ -77,13 +77,39 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Settings Task- ansible.builtin.import_tasks: "subtasks/settings.yml"+ ansible.builtin.include_tasks: "subtasks/settings.yml" when: (authelia_config_stat.stat.exists)++- name: Logrotate | Create 'authelia_logrotate' variable+ ansible.builtin.set_fact:+ authelia_logrotate:+ - path: "{{ authelia_name }}"+ content: |+ {{ server_appdata_path }}/{{ authelia_name }}/authelia.log {+ su {{ user.name }} {{ user.name }}+ rotate {{ lookup('role_var', '_log_max_backups') }}+ size {{ lookup('role_var', '_log_max_size') }}+ missingok+ notifempty+ postrotate+ docker kill --signal="HUP" {{ lookup('role_var', '_docker_container') }}+ endscript+ create 600 {{ user.name }} {{ user.name }}+ }++- name: Logrotate | Add items to '/etc/logrotate.d'+ ansible.builtin.blockinfile:+ path: "/etc/logrotate.d/{{ item.path }}"+ marker: "### SALTBOX MANAGED BLOCK - {mark} ###"+ block: "{{ item.content }}"+ create: true+ mode: "0644"+ loop: "{{ authelia_logrotate }}" - name: Create Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml"
modified
roles/authelia/tasks/subtasks/auth_backend.yml
@@ -8,9 +8,9 @@ ########################################################################## --- - name: Auth | Import File Backend Task- ansible.builtin.import_tasks: "subtasks/file_backend.yml"- when: (authelia_authentication_backend == 'file')+ ansible.builtin.include_tasks: "subtasks/file_backend.yml"+ when: (lookup('role_var', '_authentication_backend', role='authelia') == 'file') - name: Auth | Import LDAP Backend Task- ansible.builtin.import_tasks: "subtasks/ldap_backend.yml"- when: (authelia_authentication_backend == 'ldap')+ ansible.builtin.include_tasks: "subtasks/ldap_backend.yml"+ when: (lookup('role_var', '_authentication_backend', role='authelia') == 'ldap')
modified
roles/authelia/tasks/subtasks/file_backend.yml
@@ -9,38 +9,39 @@ --- - name: File | Check if 'users_database.yml' exists ansible.builtin.stat:- path: "{{ authelia_paths_location }}/users_database.yml"+ path: "{{ authelia_role_paths_location }}/users_database.yml" register: authelia_user_config_stat - name: File | Import default 'configuration.yml' ansible.builtin.template: src: configuration.yml.j2- dest: "{{ authelia_paths_location }}/configuration.yml"+ dest: "{{ authelia_role_paths_location }}/configuration.yml" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664" when: (not authelia_config_stat.stat.exists) - name: File | Format 'configuration.yml'- ansible.builtin.shell: "yyq -i {{ authelia_paths_location }}/configuration.yml"+ ansible.builtin.shell: "yyq -i {{ authelia_role_paths_location }}/configuration.yml" when: (not authelia_config_stat.stat.exists) - name: File | Generate Password Hash community.docker.docker_container: name: authelia-password-hash- image: "{{ authelia_docker_image }}"+ image: "{{ lookup('role_var', '_docker_image') }}" command: "authelia crypto hash generate --config /config/configuration.yml --password {{ user.pass | quote }}"- detach: no+ detach: false volumes:- - "{{ authelia_paths_location }}:/config"+ - "{{ authelia_role_paths_location }}:/config" networks: - name: saltbox- cleanup: yes+ cleanup: true state: started container_default_behavior: compatibility tls_hostname: localhost pull: true register: authelia_password+ no_log: true when: (not authelia_user_config_stat.stat.exists) - name: File | Sanitize Password Hash@@ -51,7 +52,7 @@ - name: File | Import default 'users_database.yml' ansible.builtin.template: src: users_database.yml.j2- dest: "{{ authelia_paths_location }}/users_database.yml"+ dest: "{{ authelia_role_paths_location }}/users_database.yml" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"
modified
roles/authelia/tasks/subtasks/ldap_backend.yml
@@ -8,26 +8,21 @@ ########################################################################## --- - name: LDAP | Get FLD- ansible.builtin.shell: |- {{ saltbox_python }} -c "from tld import get_tld; res = get_tld(\"http://{{ authelia_web_domain | default(user.domain) }}\", as_object=True); print(res.tld)"- register: authelia_tld--- name: LDAP | Get FLD- ansible.builtin.shell: |- {{ saltbox_python }} -c "from tld import get_tld; res = get_tld(\"http://{{ authelia_web_domain | default(user.domain) }}\", as_object=True); print(res.domain)"- register: authelia_domain+ tld_parse:+ url: "http://{{ lookup('role_var', '_web_domain', role='authelia') }}"+ register: authelia_tld_parse - name: LDAP | Import default 'configuration.yml' ansible.builtin.template: src: configuration.yml.j2- dest: "{{ authelia_paths_location }}/configuration.yml"+ dest: "{{ authelia_role_paths_location }}/configuration.yml" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664" when: (not authelia_config_stat.stat.exists) - name: LDAP | Format 'configuration.yml'- ansible.builtin.shell: "yyq -i {{ authelia_paths_location }}/configuration.yml"+ ansible.builtin.shell: "yyq -i {{ authelia_role_paths_location }}/configuration.yml" when: (not authelia_config_stat.stat.exists) - name: LDAP | Import lldap Role
modified
roles/authelia/tasks/subtasks/settings.yml
@@ -9,24 +9,24 @@ --- - name: Settings | Load encryption_key value ansible.builtin.shell: |- yyq '.storage.encryption_key' {{ authelia_paths_location }}/configuration.yml+ yyq '.storage.encryption_key' {{ authelia_role_paths_location }}/configuration.yml register: authelia_encryption_key - name: Settings | Add encryption_key to config file ansible.builtin.shell: |- yyq -i '.storage.encryption_key = "{{ lookup('password', '/dev/null', chars=['ascii_letters', 'digits'], length=64) }}"' {{ authelia_paths_location }}/configuration.yml+ yyq -i '.storage.encryption_key = "{{ lookup('password', '/dev/null', chars=['ascii_letters', 'digits'], length=64) }}"' {{ authelia_role_paths_location }}/configuration.yml become: true become_user: "{{ user.name }}" when: authelia_encryption_key.stdout == "null" - name: Settings | Load session.redis.host value ansible.builtin.shell: |- yyq '.session.redis.host' {{ authelia_paths_location }}/configuration.yml+ yyq '.session.redis.host' {{ authelia_role_paths_location }}/configuration.yml register: authelia_session_redis_host - name: Settings | Change session.redis.host value to 'authelia_redis' ansible.builtin.shell: |- yyq -i '.session.redis.host = "authelia-redis"' {{ authelia_paths_location }}/configuration.yml+ yyq -i '.session.redis.host = "authelia-redis"' {{ authelia_role_paths_location }}/configuration.yml become: true become_user: "{{ user.name }}" when: (authelia_session_redis_host.stdout == "redis") or (authelia_session_redis_host.stdout == "authelia_redis")
modified
roles/authelia/templates/configuration.yml.j2
@@ -3,91 +3,147 @@ # Authelia configuration # ############################################################### +# Options are light, dark, grey or auto+theme: {{ lookup('role_var', '_theme', role='authelia') }}++default_2fa_method: "{{ lookup('role_var', '_default_2fa_method', role='authelia') }}"++server:+ address: {{ lookup('role_var', '_server_address', role='authelia') }}+{% if lookup('role_var', '_server_asset_path', role='authelia') | length > 0 %}+ asset_path: {{ lookup('role_var', '_server_asset_path', role='authelia') }}+{% endif %}+ disable_healthcheck: {{ lookup('role_var', '_server_disable_healthcheck', role='authelia') | bool | lower }}+ buffers:+ read: {{ lookup('role_var', '_server_buffers_read', role='authelia') }}+ write: {{ lookup('role_var', '_server_buffers_write', role='authelia') }}+ timeouts:+ read: {{ lookup('role_var', '_server_timeouts_read', role='authelia') }}+ write: {{ lookup('role_var', '_server_timeouts_write', role='authelia') }}+ idle: {{ lookup('role_var', '_server_timeouts_idle', role='authelia') }}+ endpoints:+ enable_pprof: {{ lookup('role_var', '_server_endpoints_enable_pprof', role='authelia') }}+ enable_expvars: {{ lookup('role_var', '_server_endpoints_enable_expvars', role='authelia') }}+ headers:+ csp_template: "{{ lookup('role_var', '_server_headers_csp_template', role='authelia') }}"++log:+ level: {{ lookup('role_var', '_log_level', role='authelia') }}+ format: {{ lookup('role_var', '_log_format', role='authelia') }}+ file_path: {{ lookup('role_var', '_log_file_path', role='authelia') }}+ keep_stdout: {{ lookup('role_var', '_log_keep_stdout', role='authelia') | bool | lower }}++telemetry:+ metrics:+ enabled: {{ lookup('role_var', '_telemetry_metrics_enabled', role='authelia') | bool | lower }}+{% if lookup('role_var', '_telemetry_metrics_enabled', role='authelia') | bool %}+ address: {{ lookup('role_var', '_telemetry_metrics_address', role='authelia') }}+ buffers:+ read: {{ lookup('role_var', '_telemetry_metrics_buffers_read', role='authelia') }}+ write: {{ lookup('role_var', '_telemetry_metrics_buffers_write', role='authelia') }}+ timeouts:+ read: {{ lookup('role_var', '_telemetry_metrics_timeouts_read', role='authelia') }}+ write: {{ lookup('role_var', '_telemetry_metrics_timeouts_write', role='authelia') }}+ idle: {{ lookup('role_var', '_telemetry_metrics_timeouts_idle', role='authelia') }}+{% endif %}++totp:+ disable: {{ lookup('role_var', '_totp_disable', role='authelia') | bool | lower }}+ issuer: {{ lookup('role_var', '_totp_issuer', role='authelia') }}+ algorithm: {{ lookup('role_var', '_totp_algorithm', role='authelia') }}+ digits: {{ lookup('role_var', '_totp_digits', role='authelia') }}+ period: {{ lookup('role_var', '_totp_period', role='authelia') }}+ skew: {{ lookup('role_var', '_totp_skew', role='authelia') }}+ secret_size: {{ lookup('role_var', '_totp_secret_size', role='authelia') }}+ allowed_algorithms: {{ lookup('role_var', '_totp_allowed_algorithms', role='authelia') }}+ allowed_digits: {{ lookup('role_var', '_totp_allowed_digits', role='authelia') }}+ allowed_periods: {{ lookup('role_var', '_totp_allowed_periods', role='authelia') }}+ disable_reuse_security_policy: {{ lookup('role_var', '_totp_disable_reuse_security_policy', role='authelia') | bool | lower }}++webauthn:+ disable: {{ lookup('role_var', '_webauthn_disable', role='authelia') | bool | lower }}+ enable_passkey_login: {{ lookup('role_var', '_webauthn_enable_passkey_login', role='authelia') | bool | lower }}+ display_name: {{ lookup('role_var', '_webauthn_display_name', role='authelia') }}+ attestation_conveyance_preference: {{ lookup('role_var', '_webauthn_attestation_conveyance_preference', role='authelia') }}+ timeout: {{ lookup('role_var', '_webauthn_timeout', role='authelia') }}+ filtering:+ prohibit_backup_eligibility: {{ lookup('role_var', '_webauthn_filtering_prohibit_backup_eligibility', role='authelia') | bool | lower }}+{% if lookup('role_var', '_webauthn_filtering_permitted_aaguids', role='authelia') | length > 0 %}+ permitted_aaguids: {{ lookup('role_var', '_webauthn_filtering_permitted_aaguids', role='authelia') }}+{% endif %}+{% if lookup('role_var', '_webauthn_filtering_prohibited_aaguids', role='authelia') | length > 0 %}+ prohibited_aaguids: {{ lookup('role_var', '_webauthn_filtering_prohibited_aaguids', role='authelia') }}+{% endif %}+ selection_criteria:+ attachment: {{ lookup('role_var', '_webauthn_selection_criteria_attachment', role='authelia') }}+ discoverability: {{ lookup('role_var', '_webauthn_selection_criteria_discoverability', role='authelia') }}+ user_verification: {{ lookup('role_var', '_webauthn_selection_criteria_user_verification', role='authelia') }}+ metadata:+ enabled: {{ lookup('role_var', '_webauthn_metadata_enabled', role='authelia') | bool | lower }}+{% if lookup('role_var', '_webauthn_metadata_enabled', role='authelia') | bool %}+ cache_policy: {{ lookup('role_var', '_webauthn_metadata_cache_policy', role='authelia') }}+ validate_trust_anchor: {{ lookup('role_var', '_webauthn_metadata_validate_trust_anchor', role='authelia') }}+ validate_entry: {{ lookup('role_var', '_webauthn_metadata_validate_entry', role='authelia') }}+ validate_entry_permit_zero_aaguid: {{ lookup('role_var', '_webauthn_metadata_validate_entry_permit_zero_aaguid', role='authelia') }}+ validate_status: {{ lookup('role_var', '_webauthn_metadata_validate_status', role='authelia') }}+{% if lookup('role_var', '_webauthn_metadata_validate_status_permitted', role='authelia') | length > 0 %}+ validate_status_permitted: {{ lookup('role_var', '_webauthn_metadata_validate_status_permitted', role='authelia') }}+{% endif %}+{% if lookup('role_var', '_webauthn_metadata_validate_status_prohibited', role='authelia') | length > 0 %}+ validate_status_prohibited: {{ lookup('role_var', '_webauthn_metadata_validate_status_prohibited', role='authelia') }}+{% endif %}+{% endif %}++{% if lookup('role_var', '_duo_enabled', role='authelia') | bool %}+duo_api:+ disable: false+ hostname: {{ lookup('role_var', '_duo_hostname', role='authelia') }}+ integration_key: {{ lookup('role_var', '_duo_integration_key', role='authelia') }}+ secret_key: {{ lookup('role_var', '_duo_secret_key', role='authelia') }}+ enable_self_enrollment: {{ lookup('role_var', '_duo_self_enrollment', role='authelia') | bool | lower }}+{% endif %}+ identity_validation: reset_password:- jwt_secret: {{ authelia_jwt_secret }}--# Options are light, dark, grey or auto-theme: {{ authelia_theme }}--{% if authelia_default_redirection_url | length > 0 %}-default_redirection_url: {{ authelia_default_redirection_url }}--{% endif %}-default_2fa_method: "{{ authelia_default_2fa_method }}"--server:- address: {{ authelia_server_address }}- buffers:- read: {{ authelia_server_buffers_read }}- write: {{ authelia_server_buffers_write }}- disable_healthcheck: {{ authelia_server_disable_healthcheck }}- headers:- csp_template: "{{ authelia_server_headers_csp_template }}"- endpoints:- enable_pprof: {{ authelia_server_enable_pprof }}- enable_expvars: {{ authelia_server_enable_expvars }}--log:- level: {{ authelia_log_level }}- format: {{ authelia_log_format }}- file_path: {{ authelia_log_file_path }}- keep_stdout: {{ authelia_log_keep_stdout }}--totp:- issuer: {{ authelia_totp_issuer }}- period: {{ authelia_totp_period }}- skew: {{ authelia_totp_skew }}- digits: {{ authelia_totp_digits }}- secret_size: {{ authelia_totp_secret_size }}--{% if authelia_duo_enabled | bool %}-duo_api:- disable: false- hostname: {{ authelia_duo_hostname }}- integration_key: {{ authelia_duo_integration_key }}- secret_key: {{ authelia_duo_secret_key }}- enable_self_enrollment: {{ authelia_duo_self_enrollment }}-{% else %}-#-# If you want to use Duo Push notifications-#-# duo_api:-# hostname: api-123456789.example.com-# integration_key: ABCDEF-# secret_key: 1234567890abcdefghifjkl-#-# Read more at https://www.authelia.com/docs/configuration/duo-push-notifications.html-#-{% endif %}--webauthn:- disable: {{ authelia_webauthn_disable }}- display_name: {{ authelia_webauthn_display_name }}- attestation_conveyance_preference: {{ authelia_webauthn_attestation_conveyance_preference }}- user_verification: {{ authelia_webauthn_user_verification }}- timeout: {{ authelia_webauthn_timeout }}+ jwt_lifespan: {{ lookup('role_var', '_identity_validation_reset_password_jwt_lifespan', role='authelia') }}+ jwt_algorithm: {{ lookup('role_var', '_identity_validation_reset_password_jwt_algorithm', role='authelia') }}+ jwt_secret: {{ lookup('role_var', '_jwt_secret', role='authelia') }}+ elevated_session:+ code_lifespan: {{ lookup('role_var', '_identity_validation_elevated_session_code_lifespan', role='authelia') }}+ elevation_lifespan: {{ lookup('role_var', '_identity_validation_elevated_session_elevation_lifespan', role='authelia') }}+ characters: {{ lookup('role_var', '_identity_validation_elevated_session_characters', role='authelia') }}+ require_second_factor: {{ lookup('role_var', '_identity_validation_elevated_session_require_second_factor', role='authelia') | bool | lower }}+ skip_second_factor: {{ lookup('role_var', '_identity_validation_elevated_session_skip_second_factor', role='authelia') | bool | lower }}++ntp:+ address: {{ lookup('role_var', '_ntp_address', role='authelia') }}+ version: {{ lookup('role_var', '_ntp_version', role='authelia') }}+ max_desync: {{ lookup('role_var', '_ntp_max_desync', role='authelia') }}+ disable_startup_check: {{ lookup('role_var', '_ntp_disable_startup_check', role='authelia') | bool | lower }}+ disable_failure: {{ lookup('role_var', '_ntp_disable_failure', role='authelia') | bool | lower }} authentication_backend:+ password_change:+ disable: {{ lookup('role_var', '_authentication_backend_password_change_disable', role='authelia') | bool | lower }} password_reset:- disable: {{ authelia_authentication_backend_password_reset_disable }}- custom_url: "{{ authelia_authentication_backend_password_reset_custom_url }}"- refresh_interval: {{ authelia_authentication_backend_refresh_interval }}-{% if authelia_authentication_backend == 'file' %}+ disable: {{ lookup('role_var', '_authentication_backend_password_reset_disable', role='authelia') | bool | lower }}+ custom_url: "{{ lookup('role_var', '_authentication_backend_password_reset_custom_url', role='authelia') }}"+ refresh_interval: {{ lookup('role_var', '_authentication_backend_refresh_interval', role='authelia') }}+{% if lookup('role_var', '_authentication_backend', role='authelia') == 'file' %} file:- path: {{ authelia_authentication_backend_file_path }}- watch: {{ authelia_authentication_backend_file_watch }}+ path: {{ lookup('role_var', '_authentication_backend_file_path', role='authelia') }}+ watch: {{ lookup('role_var', '_authentication_backend_file_watch', role='authelia') | bool | lower }} password:- algorithm: {{ authelia_authentication_backend_file_password_algorithm }}+ algorithm: {{ lookup('role_var', '_authentication_backend_file_password_algorithm', role='authelia') }} argon2:- variant: {{ authelia_authentication_backend_file_password_argon2_variant }}- iterations: {{ authelia_authentication_backend_file_password_argon2_iterations }}- memory: {{ authelia_authentication_backend_file_password_argon2_memory }}- parallelism: {{ authelia_authentication_backend_file_password_argon2_parallelism }}- key_length: {{ authelia_authentication_backend_file_password_argon2_key_length }}- salt_length: {{ authelia_authentication_backend_file_password_argon2_salt_length }}-{% endif %}-{% if authelia_authentication_backend == 'ldap' %}+ variant: {{ lookup('role_var', '_authentication_backend_file_password_argon2_variant', role='authelia') }}+ iterations: {{ lookup('role_var', '_authentication_backend_file_password_argon2_iterations', role='authelia') }}+ memory: {{ lookup('role_var', '_authentication_backend_file_password_argon2_memory', role='authelia') }}+ parallelism: {{ lookup('role_var', '_authentication_backend_file_password_argon2_parallelism', role='authelia') }}+ key_length: {{ lookup('role_var', '_authentication_backend_file_password_argon2_key_length', role='authelia') }}+ salt_length: {{ lookup('role_var', '_authentication_backend_file_password_argon2_salt_length', role='authelia') }}+{% endif %}+{% if lookup('role_var', '_authentication_backend', role='authelia') == 'ldap' %} ldap: implementation: custom address: ldap://lldap:3890@@ -95,44 +151,39 @@ tls: skip_verify: true minimum_version: TLS1.2- base_dn: dc={{ authelia_domain.stdout }},dc={{ authelia_tld.stdout }}+ base_dn: dc={{ authelia_tld_parse.domain }},dc={{ authelia_tld_parse.tld }} additional_users_dn: ou=people users_filter: "(&({username_attribute}={input})(objectClass=person))" additional_groups_dn: ou=groups groups_filter: "(member={dn})"- - user: uid={{ user.name }},ou=people,dc={{ authelia_domain.stdout }},dc={{ authelia_tld.stdout }}+ user: uid={{ user.name }},ou=people,dc={{ authelia_tld_parse.domain }},dc={{ authelia_tld_parse.tld }} password: {{ user.pass }} attributes: group_name: cn display_name: displayName mail: mail- {% endif %} password_policy: standard:- enabled: {{ authelia_password_policy_standard_enabled }}- min_length: {{ authelia_password_policy_standard_min_length }}- max_length: {{ authelia_password_policy_standard_max_length }}- require_uppercase: {{ authelia_password_policy_standard_require_uppercase }}- require_lowercase: {{ authelia_password_policy_standard_require_lowercase }}- require_number: {{ authelia_password_policy_standard_require_number }}- require_special: {{ authelia_password_policy_standard_require_special }}+ enabled: {{ lookup('role_var', '_password_policy_standard_enabled', role='authelia') | bool | lower }}+ min_length: {{ lookup('role_var', '_password_policy_standard_min_length', role='authelia') }}+ max_length: {{ lookup('role_var', '_password_policy_standard_max_length', role='authelia') }}+ require_uppercase: {{ lookup('role_var', '_password_policy_standard_require_uppercase', role='authelia') }}+ require_lowercase: {{ lookup('role_var', '_password_policy_standard_require_lowercase', role='authelia') }}+ require_number: {{ lookup('role_var', '_password_policy_standard_require_number', role='authelia') }}+ require_special: {{ lookup('role_var', '_password_policy_standard_require_special', role='authelia') }} zxcvbn:- enabled: {{ authelia_password_policy_zxcvbn_enabled }}- min_score: {{ authelia_password_policy_zxcvbn_min_score }}--#-# Read more at https://www.authelia.com/docs/configuration/access-control.html-#+ enabled: {{ lookup('role_var', '_password_policy_zxcvbn_enabled', role='authelia') | bool | lower }}+ min_score: {{ lookup('role_var', '_password_policy_zxcvbn_min_score', role='authelia') }}+ access_control:- default_policy: {{ authelia_access_control_default_policy }}+ default_policy: {{ lookup('role_var', '_access_control_default_policy', role='authelia') }} rules:- {{ (authelia_access_control_whitelist_rules_lookup + authelia_access_control_rules) | to_nice_yaml | indent(4) }}+ {{ (lookup('role_var', '_access_control_whitelist_rules_lookup', role='authelia') + lookup('role_var', '_access_control_rules', role='authelia')) | to_nice_yaml | indent(4) }} session:- name: {{ authelia_web_subdomain + '.' + authelia_web_domain | lower }}+ name: {{ (lookup('role_var', '_web_subdomain', role='authelia') + '.' + lookup('role_var', '_web_domain', role='authelia')) | lower }} secret: {{ lookup('password', '/dev/null', chars=['ascii_letters', 'digits'], length=32) }} expiration: 1h inactivity: 5m@@ -142,13 +193,16 @@ host: authelia-redis port: 6379 cookies:- - domain: {{ authelia_web_domain | lower }}- authelia_url: {{ authelia_web_url | lower }}- name: authelia_{{ authelia_web_domain | lower }}+ - domain: {{ lookup('role_var', '_web_domain', role='authelia') | lower }}+ authelia_url: {{ lookup('role_var', '_web_url', role='authelia') | lower }}+ name: authelia_{{ lookup('role_var', '_web_domain', role='authelia') | lower }} same_site: lax inactivity: 5m expiration: 1h remember_me: 1M+{% if lookup('role_var', '_default_redirection_url', role='authelia') | length > 0 %}+ default_redirection_url: {{ lookup('role_var', '_default_redirection_url', role='authelia') }}+{% endif %} regulation: max_retries: 3@@ -160,73 +214,29 @@ local: path: /config/db.sqlite3 -ntp:- address: "{{ authelia_ntp_address }}"- version: {{ authelia_ntp_version }}- max_desync: {{ authelia_ntp_max_desync }}- disable_startup_check: {{ authelia_ntp_disable_startup_check }}- disable_failure: {{ authelia_ntp_disable_failure }}- notifier:- disable_startup_check: {{ authelia_notifier_disable_startup_check }}-{% if authelia_notifier == 'filesystem' %}+ disable_startup_check: {{ lookup('role_var', '_notifier_disable_startup_check', role='authelia') | bool | lower }}+{% if lookup('role_var', '_notifier', role='authelia') == 'filesystem' %} filesystem: filename: /config/notification.txt {% endif %}-{% if authelia_notifier == 'smtp' %}+{% if lookup('role_var', '_notifier', role='authelia') == 'smtp' %} smtp:- host: {{ authelia_notifier_smtp_host }}- port: {{ authelia_notifier_smtp_port }}- timeout: {{ (authelia_notifier_smtp_timeout) | default(omit) }}- username: {{ (authelia_notifier_smtp_username) | default(omit) }}- password: {{ (authelia_notifier_smtp_password) | default(omit) }}- sender: {{ authelia_notifier_smtp_sender }}- identifier: {{ (authelia_notifier_smtp_identifier) | default(omit) }}- subject: "{{ (authelia_notifier_smtp_subject) | default(omit) }}"- startup_check_address: {{ (authelia_notifier_smtp_startup_check_address) | default(omit) }}- disable_require_tls: {{ (authelia_notifier_smtp_disable_require_tls) | default(omit) }}- disable_html_emails: {{ (authelia_notifier_smtp_disable_html_emails) | default(omit) }}+ host: {{ lookup('role_var', '_notifier_smtp_host', role='authelia') }}+ port: {{ lookup('role_var', '_notifier_smtp_port', role='authelia') }}+ timeout: {{ (lookup('role_var', '_notifier_smtp_timeout', role='authelia')) | default(omit) }}+ username: {{ (lookup('role_var', '_notifier_smtp_username', role='authelia')) | default(omit) }}+ password: {{ (lookup('role_var', '_notifier_smtp_password', role='authelia')) | default(omit) }}+ sender: {{ lookup('role_var', '_notifier_smtp_sender', role='authelia') }}+ identifier: {{ (lookup('role_var', '_notifier_smtp_identifier', role='authelia')) | default(omit) }}+ subject: "{{ (lookup('role_var', '_notifier_smtp_subject', role='authelia')) | default(omit) }}"+ startup_check_address: {{ (lookup('role_var', '_notifier_smtp_startup_check_address', role='authelia')) | default(omit) }}+ disable_require_tls: {{ (lookup('role_var', '_notifier_smtp_disable_require_tls', role='authelia')) | default(omit) }}+ disable_html_emails: {{ (lookup('role_var', '_notifier_smtp_disable_html_emails', role='authelia')) | default(omit) }} tls:- server_name: {{ (authelia_notifier_smtp_tls_server_name) | default(omit) }}- skip_verify: {{ (authelia_notifier_smtp_tls_skip_verify) | default(omit) }}- minimum_version: {{ (authelia_notifier_smtp_tls_minimum_version) | default(omit) }}-{% else %}-#-# If you want to use email here is a template (Replace the above entry as you can only have one notifier configured)-#-# notifier:-# disable_startup_check: false-# smtp:-# host: 127.0.0.1-# port: 1025-# timeout: 5s-# username: test-# password: password-# sender: admin@example.com-# identifier: localhost-# subject: "[Authelia] {title}"-# startup_check_address: test@authelia.com-# disable_require_tls: false-# disable_html_emails: false-# tls:-# server_name: smtp.example.com-# skip_verify: false-# minimum_version: TLS1.2-#-# Read more at https://www.authelia.com/docs/configuration/notifier/smtp.html-#-{% endif %}--telemetry:- metrics:- enabled: false- address: "tcp://0.0.0.0:9959"- buffers:- read: 4096- write: 4096- timeouts:- read: 6s- write: 6s- idle: 30s+ server_name: {{ (lookup('role_var', '_notifier_smtp_tls_server_name', role='authelia')) | default(omit) }}+ skip_verify: {{ (lookup('role_var', '_notifier_smtp_tls_skip_verify', role='authelia')) | default(omit) }}+ minimum_version: {{ (lookup('role_var', '_notifier_smtp_tls_minimum_version', role='authelia')) | default(omit) }}+{% endif %} ...
modified
roles/authentik/defaults/main.yml
@@ -17,145 +17,160 @@ # Settings ################################ -authentik_email_host: "localhost"-authentik_email_port: "25"-authentik_email_username: ""-authentik_email_password: ""-authentik_email_tls: "false"-authentik_email_ssl: "false"-authentik_email_timeout: "10"-authentik_email_from: "authentik@localhost"-authentik_access_token_validity: "24" # Hours, only fresh installs use this+authentik_role_email_host: "localhost"+authentik_role_email_port: "25"+authentik_role_email_username: ""+authentik_role_email_password: ""+authentik_role_email_tls: "false"+authentik_role_email_ssl: "false"+authentik_role_email_timeout: "10"+authentik_role_email_from: "authentik@localhost"+authentik_role_access_token_validity: "24" # Hours, only fresh installs use this++################################+# Legacy+################################++# Do not edit or override using the inventory+authentik_role_redis_name: "{{ authentik_name }}-redis" ################################ # Postgres ################################ -authentik_postgres_name: "{{ authentik_name }}-postgres"-authentik_postgres_docker_env_db: "authentik"-authentik_postgres_docker_image_tag: "16-alpine"-authentik_postgres_docker_image_repo: "postgres"+# Authentik will always require postgres, this just allows you to skip the one Saltbox deploys+authentik_role_postgres_deploy: true+authentik_role_postgres_name: "{{ authentik_name }}-postgres"+authentik_role_postgres_user: "{{ postgres_role_docker_env_user }}"+authentik_role_postgres_password: "{{ postgres_role_docker_env_password }}"+authentik_role_postgres_docker_env_db: "authentik"+authentik_role_postgres_docker_image_tag: "16-alpine"+authentik_role_postgres_docker_image_repo: "postgres"+authentik_role_postgres_docker_healthcheck:+ test: ["CMD-SHELL", "pg_isready -d {{ lookup('role_var', '_postgres_docker_env_db', role='authentik') }} -U {{ postgres_role_docker_env_user }}"]+ start_period: 20s+ interval: 30s+ retries: 5+ timeout: 5s ################################ # Paths ################################ -authentik_paths_folder: "{{ authentik_name }}"-authentik_paths_location: "{{ server_appdata_path }}/{{ authentik_paths_folder }}"-authentik_paths_folders_list:- - "{{ authentik_paths_location }}"- - "{{ authentik_paths_location }}/media"- - "{{ authentik_paths_location }}/custom-templates"- - "{{ authentik_paths_location }}/certs"+authentik_role_paths_folder: "{{ authentik_name }}"+authentik_role_paths_location: "{{ server_appdata_path }}/{{ authentik_role_paths_folder }}"+authentik_role_paths_folders_list:+ - "{{ authentik_role_paths_location }}"+ - "{{ authentik_role_paths_location }}/media"+ - "{{ authentik_role_paths_location }}/custom-templates"+ - "{{ authentik_role_paths_location }}/certs" ################################ # Web ################################ -authentik_web_subdomain: "auth"-authentik_web_domain: "{{ user.domain }}"-authentik_web_port: "9000"-authentik_web_url: "{{ 'https://' + (authentik_web_subdomain + '.' + authentik_web_domain- if (authentik_web_subdomain | length > 0)- else authentik_web_domain) }}"+authentik_role_web_subdomain: "auth"+authentik_role_web_domain: "{{ user.domain }}"+authentik_role_web_port: "9000"+authentik_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='authentik') + '.' + lookup('role_var', '_web_domain', role='authentik')+ if (lookup('role_var', '_web_subdomain', role='authentik') | length > 0)+ else lookup('role_var', '_web_domain', role='authentik')) }}"+authentik_role_web_host: "{{ (lookup('role_var', '_web_subdomain', role='authentik') + '.' + lookup('role_var', '_web_domain', role='authentik')+ if (lookup('role_var', '_web_subdomain', role='authentik') | length > 0)+ else lookup('role_var', '_web_domain', role='authentik')) }}" ################################ # DNS ################################ -authentik_dns_record: "{{ authentik_web_subdomain }}"-authentik_dns_zone: "{{ authentik_web_domain }}"-authentik_dns_proxy: "{{ dns.proxied }}"+authentik_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='authentik') }}"+authentik_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='authentik') }}"+authentik_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -authentik_traefik_sso_middleware: ""-authentik_traefik_middleware_default: "{{ traefik_default_middleware }}"-authentik_traefik_middleware_custom: ""-authentik_traefik_certresolver: "{{ traefik_default_certresolver }}"-authentik_traefik_enabled: true-authentik_traefik_api_enabled: false-authentik_traefik_api_endpoint: ""+authentik_role_traefik_sso_middleware: ""+authentik_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+authentik_role_traefik_middleware_custom: ""+authentik_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+authentik_role_traefik_enabled: true+authentik_role_traefik_api_enabled: false+authentik_role_traefik_api_endpoint: ""+authentik_role_traefik_outpost_catch_all: false ################################ # Setup ################################ -authentik_host: "http://{{ authentik_name }}:9000"-authentik_default_user: "akadmin"+authentik_role_host: "http://{{ authentik_name }}:{{ lookup('role_var', '_web_port', role='authentik') }}"+authentik_role_default_user: "akadmin"+authentik_role_response_headers:+ - "X-authentik-username"+ - "X-authentik-groups"+ - "X-authentik-entitlements"+ - "X-authentik-email"+ - "X-authentik-name"+ - "X-authentik-uid"+ - "X-authentik-jwt"+ - "X-authentik-meta-jwks"+ - "X-authentik-meta-outpost"+ - "X-authentik-meta-provider"+ - "X-authentik-meta-app"+ - "X-authentik-meta-version" ################################ # Docker ################################ # Container-authentik_docker_container: "{{ authentik_name }}"+authentik_role_docker_container: "{{ authentik_name }}" # Image-authentik_docker_image_pull: true-authentik_docker_image_tag: "2025.8"-authentik_docker_image: "ghcr.io/goauthentik/server:{{ authentik_docker_image_tag }}"--# Ports-authentik_docker_ports_defaults: []-authentik_docker_ports_custom: []-authentik_docker_ports: "{{ authentik_docker_ports_defaults- + authentik_docker_ports_custom }}"+authentik_role_docker_image_pull: true+authentik_role_docker_image_repo: "ghcr.io/goauthentik/server"+authentik_role_docker_image_tag: "2025.10"+authentik_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='authentik') }}:{{ lookup('role_var', '_docker_image_tag', role='authentik') }}" # Envs-authentik_docker_envs_default:- AUTHENTIK_REDIS__HOST: authentik-redis- AUTHENTIK_POSTGRESQL__HOST: "{{ authentik_postgres_name }}"- AUTHENTIK_POSTGRESQL__USER: "{{ postgres_docker_env_user }}"- AUTHENTIK_POSTGRESQL__NAME: "{{ authentik_postgres_docker_env_db }}"- AUTHENTIK_POSTGRESQL__PASSWORD: "{{ postgres_docker_env_password }}"+authentik_role_docker_envs_default:+ AUTHENTIK_POSTGRESQL__HOST: "{{ lookup('role_var', '_postgres_name', role='authentik') }}"+ AUTHENTIK_POSTGRESQL__USER: "{{ lookup('role_var', '_postgres_user', role='authentik') }}"+ AUTHENTIK_POSTGRESQL__NAME: "{{ lookup('role_var', '_postgres_docker_env_db', role='authentik') }}"+ AUTHENTIK_POSTGRESQL__PASSWORD: "{{ lookup('role_var', '_postgres_password', role='authentik') }}" AUTHENTIK_SECRET_KEY: "{{ authentik_saltbox_facts.facts.secret_key }}" AUTHENTIK_BOOTSTRAP_TOKEN: "{{ omit if authentik_data_folder.stat.exists else authentik_bootstrap_token }}"- AUTHENTIK_EMAIL__HOST: "{{ authentik_email_host }}"- AUTHENTIK_EMAIL__PORT: "{{ authentik_email_port }}"- AUTHENTIK_EMAIL__USERNAME: "{{ authentik_email_username }}"- AUTHENTIK_EMAIL__PASSWORD: "{{ authentik_email_password }}"- AUTHENTIK_EMAIL__USE_TLS: "{{ authentik_email_tls }}"- AUTHENTIK_EMAIL__USE_SSL: "{{ authentik_email_ssl }}"- AUTHENTIK_EMAIL__TIMEOUT: "{{ authentik_email_timeout }}"- AUTHENTIK_EMAIL__FROM: "{{ authentik_email_from }}"+ AUTHENTIK_EMAIL__HOST: "{{ lookup('role_var', '_email_host', role='authentik') }}"+ AUTHENTIK_EMAIL__PORT: "{{ lookup('role_var', '_email_port', role='authentik') }}"+ AUTHENTIK_EMAIL__USERNAME: "{{ lookup('role_var', '_email_username', role='authentik') }}"+ AUTHENTIK_EMAIL__PASSWORD: "{{ lookup('role_var', '_email_password', role='authentik') }}"+ AUTHENTIK_EMAIL__USE_TLS: "{{ lookup('role_var', '_email_tls', role='authentik') }}"+ AUTHENTIK_EMAIL__USE_SSL: "{{ lookup('role_var', '_email_ssl', role='authentik') }}"+ AUTHENTIK_EMAIL__TIMEOUT: "{{ lookup('role_var', '_email_timeout', role='authentik') }}"+ AUTHENTIK_EMAIL__FROM: "{{ lookup('role_var', '_email_from', role='authentik') }}" AUTHENTIK_LISTEN__TRUSTED_PROXY_CIDRS: "172.19.0.0/16"-authentik_docker_envs_custom: {}-authentik_docker_envs: "{{ authentik_docker_envs_default- | combine(authentik_docker_envs_custom) }}"+authentik_role_docker_envs_custom: {}+authentik_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='authentik')+ | combine(lookup('role_var', '_docker_envs_custom', role='authentik')) }}" # Commands-authentik_docker_commands_default:+authentik_role_docker_commands_default: - "server"-authentik_docker_commands_custom: []-authentik_docker_commands: "{{ authentik_docker_commands_default- + authentik_docker_commands_custom }}"+authentik_role_docker_commands_custom: []+authentik_role_docker_commands: "{{ lookup('role_var', '_docker_commands_default', role='authentik')+ + lookup('role_var', '_docker_commands_custom', role='authentik') }}" # Volumes-authentik_docker_volumes_default:- - "{{ authentik_paths_location }}/media:/media"- - "{{ authentik_paths_location }}/custom-templates:/templates"-authentik_docker_volumes_custom: []-authentik_docker_volumes: "{{ authentik_docker_volumes_default- + authentik_docker_volumes_custom }}"--# Devices-authentik_docker_devices_default: []-authentik_docker_devices_custom: []-authentik_docker_devices: "{{ authentik_docker_devices_default- + authentik_docker_devices_custom }}"--# Hosts-authentik_docker_hosts_default: {}-authentik_docker_hosts_custom: {}-authentik_docker_hosts: "{{ docker_hosts_common- | combine(authentik_docker_hosts_default)- | combine(authentik_docker_hosts_custom) }}"+authentik_role_docker_volumes_default:+ - "{{ authentik_role_paths_location }}/media:/media"+ - "{{ authentik_role_paths_location }}/custom-templates:/templates"+authentik_role_docker_volumes_custom: []+authentik_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='authentik')+ + lookup('role_var', '_docker_volumes_custom', role='authentik') }}" # Labels-authentik_docker_labels_default:+authentik_role_docker_labels_default: traefik.http.routers.authentik-outpost-http.entrypoints: "web" traefik.http.routers.authentik-outpost-http.service: "authentik-outpost-http" traefik.http.routers.authentik-outpost-http.rule: "PathPrefix(`/outpost.goauthentik.io/`)"@@ -165,46 +180,33 @@ traefik.http.routers.authentik-outpost.service: "authentik-outpost" traefik.http.routers.authentik-outpost.rule: "PathPrefix(`/outpost.goauthentik.io/`)" traefik.http.routers.authentik-outpost.tls.options: "securetls@file"- traefik.http.routers.authentik-outpost.tls.certresolver: "{{ authentik_traefik_certresolver }}"+ traefik.http.routers.authentik-outpost.tls.certresolver: "{{ authentik_role_traefik_certresolver }}" traefik.http.routers.authentik-outpost.middlewares: "{{ traefik_default_middleware }}" traefik.http.routers.authentik-outpost.priority: "99"- traefik.http.services.authentik-outpost-http.loadbalancer.server.port: "9000"- traefik.http.services.authentik-outpost.loadbalancer.server.port: "9000"-authentik_docker_labels_custom: {}-authentik_docker_labels: "{{ docker_labels_common- | combine(authentik_docker_labels_default)- | combine(authentik_docker_labels_custom) }}"+ traefik.http.services.authentik-outpost-http.loadbalancer.server.port: "{{ lookup('role_var', '_web_port', role='authentik') }}"+ traefik.http.services.authentik-outpost.loadbalancer.server.port: "{{ lookup('role_var', '_web_port', role='authentik') }}"+authentik_role_docker_labels_custom: {}+authentik_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='authentik')+ | combine(lookup('role_var', '_docker_labels_custom', role='authentik')) }}" # Hostname-authentik_docker_hostname: "{{ authentik_name }}"+authentik_role_docker_hostname: "{{ authentik_name }}" # Networks-authentik_docker_networks_alias: "{{ authentik_name }}"-authentik_docker_networks_default: []-authentik_docker_networks_custom: []-authentik_docker_networks: "{{ docker_networks_common- + authentik_docker_networks_default- + authentik_docker_networks_custom }}"--# Capabilities-authentik_docker_capabilities_default: []-authentik_docker_capabilities_custom: []-authentik_docker_capabilities: "{{ authentik_docker_capabilities_default- + authentik_docker_capabilities_custom }}"--# Security Opts-authentik_docker_security_opts_default: []-authentik_docker_security_opts_custom: []-authentik_docker_security_opts: "{{ authentik_docker_security_opts_default- + authentik_docker_security_opts_custom }}"+authentik_role_docker_networks_alias: "{{ authentik_name }}"+authentik_role_docker_networks_default: []+authentik_role_docker_networks_custom: []+authentik_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='authentik')+ + lookup('role_var', '_docker_networks_custom', role='authentik') }}" # Restart Policy-authentik_docker_restart_policy: unless-stopped+authentik_role_docker_restart_policy: unless-stopped # State-authentik_docker_state: started+authentik_role_docker_state: started # Dependencies-authentik_depends_on: "authentik-redis,{{ authentik_postgres_name }}"-authentik_depends_on_delay: "0"-authentik_depends_on_healthchecks: "false"+authentik_role_depends_on: "{{ lookup('role_var', '_postgres_name', role='authentik') }}"+authentik_role_depends_on_delay: "0"+authentik_role_depends_on_healthchecks: "true"
modified
roles/authentik/tasks/main.yml
@@ -7,62 +7,59 @@ # GNU General Public License v3.0 # ######################################################################### ---+- name: Import authentik_worker role variables+ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/variables/import_role_vars.yml"+ vars:+ import_role_name: authentik_worker+ - name: Remove existing Docker containers ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml" vars: var_prefix: "{{ item }}" loop: - "{{ authentik_name }}"- - "authentik-worker"- - "authentik-redis"- - "authentik-postgres"+ - "{{ authentik_worker_name }}"+ - "{{ lookup('role_var', '_postgres_name', role='authentik') }}"+ - "{{ lookup('role_var', '_redis_name', role='authentik') }}" # Legacy removal - name: Reset Authentik directory ansible.builtin.file:- path: "{{ authentik_paths_location }}"+ path: "{{ lookup('role_var', '_paths_location', role='authentik') }}" state: absent when: ('authentik-reset' in ansible_run_tags) -- name: Check if '{{ authentik_paths_location }}' exists+- name: Check if '{{ lookup('role_var', '_paths_location', role='authentik') }}' exists ansible.builtin.stat:- path: "{{ authentik_paths_location }}"+ path: "{{ lookup('role_var', '_paths_location', role='authentik') }}" register: authentik_data_folder - name: Generate Bootstrap Token ansible.builtin.set_fact: authentik_bootstrap_token: "{{ lookup('ansible.builtin.password', '/dev/null', chars=['ascii_letters', 'digits'], length=50) }}"+ when: (not authentik_data_folder.stat.exists) - name: Create directories ansible.builtin.include_tasks: "{{ resources_tasks_path }}/directories/create_directories.yml"--- name: "Import Redis Role"- ansible.builtin.include_role:- name: redis- vars:- redis_instances: ["authentik-redis"]- redis_docker_image_tag: "alpine"- redis_paths_folder: "{{ authentik_paths_folder }}/redis"- redis_paths_location: "{{ server_appdata_path }}/{{ redis_paths_folder }}"- redis_docker_commands_default:- - "--save 60 1 --loglevel warning" - name: "Import Postgres Role" ansible.builtin.include_role: name: postgres vars:- postgres_instances: ["{{ authentik_postgres_name }}"]- postgres_docker_image_tag: "{{ authentik_postgres_docker_image_tag }}"- postgres_docker_image_repo: "{{ authentik_postgres_docker_image_repo }}"- postgres_paths_folder: "{{ authentik_paths_folder }}/postgres"- postgres_paths_location: "{{ server_appdata_path }}/{{ postgres_paths_folder }}"- postgres_docker_env_db: "{{ authentik_postgres_docker_env_db }}"+ postgres_instances: ["{{ lookup('role_var', '_postgres_name', role='authentik') }}"]+ postgres_role_docker_image_tag: "{{ lookup('role_var', '_postgres_docker_image_tag', role='authentik') }}"+ postgres_role_docker_image_repo: "{{ lookup('role_var', '_postgres_docker_image_repo', role='authentik') }}"+ postgres_role_paths_folder: "{{ lookup('role_var', '_paths_folder', role='authentik') }}/postgres"+ postgres_role_paths_location: "{{ server_appdata_path }}/{{ postgres_role_paths_folder }}"+ postgres_role_docker_env_db: "{{ lookup('role_var', '_postgres_docker_env_db', role='authentik') }}"+ postgres_role_docker_healthcheck: "{{ lookup('role_var', '_postgres_docker_healthcheck', role='authentik') }}"+ when: lookup('role_var', '_postgres_deploy', role='authentik') - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: "Save Authentik Saltbox facts" saltbox_facts:@@ -72,7 +69,16 @@ secret_key: "{{ lookup('ansible.builtin.password', '/dev/null', chars=['ascii_letters', 'digits'], length=50) }}" owner: "{{ user.name }}" group: "{{ user.name }}"+ base_path: "{{ server_appdata_path }}" register: authentik_saltbox_facts++- name: "Wait for {{ lookup('role_var', '_postgres_name', role='authentik') }} container to be healthy"+ community.docker.docker_container_info:+ name: "{{ lookup('role_var', '_postgres_name', role='authentik') }}"+ until: "authentik_postgres_container_info.container.State.Health.Status == 'healthy'"+ register: authentik_postgres_container_info+ retries: 15+ delay: 10 - name: Create Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml"@@ -81,10 +87,6 @@ ansible.builtin.include_role: name: authentik_worker -- name: Setup Toggle- ansible.builtin.set_fact:- authentik_setup: "{{ false if authentik_data_folder.stat.exists else true }}"- - name: Setup Task- ansible.builtin.import_tasks: "subtasks/setup.yml"- when: authentik_setup and not continuous_integration+ ansible.builtin.include_tasks: "subtasks/setup.yml"+ when: (not authentik_data_folder.stat.exists) and (not continuous_integration)
modified
roles/authentik/tasks/subtasks/setup.yml
@@ -9,7 +9,7 @@ --- - name: Validate bootstrap token (retry until Authentik is ready) ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/core/users/me/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/core/users/me/" method: GET headers: Authorization: "Bearer {{ authentik_bootstrap_token }}"@@ -20,7 +20,7 @@ - name: Create new admin user ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/core/users/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/core/users/" method: POST body_format: json headers:@@ -37,7 +37,7 @@ - name: Set new admin user's password ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/core/users/{{ new_user_response.json.pk }}/set_password/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/core/users/{{ new_user_response.json.pk }}/set_password/" method: POST body_format: json headers:@@ -47,10 +47,11 @@ body: password: "{{ user.pass }}" status_code: 204+ no_log: true - name: Create new admin group ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/core/groups/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/core/groups/" method: POST body_format: json headers:@@ -67,7 +68,7 @@ - name: Create token for new admin user ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/core/tokens/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/core/tokens/" method: POST body_format: json headers:@@ -83,7 +84,7 @@ - name: Get token for new admin user ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/core/tokens/saltbox-api-token/view_key/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/core/tokens/saltbox-api-token/view_key/" method: GET body_format: json headers:@@ -104,10 +105,11 @@ api_token: "{{ authentik_admin_token }}" owner: "{{ user.name }}" group: "{{ user.name }}"+ base_path: "{{ server_appdata_path }}" - name: Retrieve flow instances ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/flows/instances/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/flows/instances/" method: GET headers: Authorization: "Bearer {{ authentik_admin_token }}"@@ -129,7 +131,7 @@ - name: Create Traefik Forward Auth provider ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/providers/proxy/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/providers/proxy/" method: POST body_format: json headers:@@ -140,16 +142,16 @@ name: "Traefik Forward Auth" authorization_flow: "{{ authentik_authorization_flow_uuid }}" invalidation_flow: "{{ authentik_invalidation_flow_uuid }}"- cookie_domain: "{{ authentik_web_domain }}"- external_host: "{{ authentik_web_url }}"+ cookie_domain: "{{ lookup('role_var', '_web_domain', role='authentik') }}"+ external_host: "{{ lookup('role_var', '_web_url', role='authentik') }}" mode: "forward_domain"- access_token_validity: "hours={{ authentik_access_token_validity }}"+ access_token_validity: "hours={{ lookup('role_var', '_access_token_validity', role='authentik') }}" status_code: 201 register: proxy_providers - name: Create Traefik Forward Auth application ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/core/applications/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/core/applications/" method: POST body_format: json headers:@@ -165,7 +167,7 @@ - name: Retrieve Outposts ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/outposts/instances/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/outposts/instances/" method: GET headers: Authorization: "Bearer {{ authentik_admin_token }}"@@ -175,7 +177,7 @@ - name: Edit Embedded Outpost ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/outposts/instances/{{ outposts.json.results[0].pk }}/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/outposts/instances/{{ outposts.json.results[0].pk }}/" method: PATCH body_format: json headers:@@ -186,12 +188,12 @@ providers: - "{{ proxy_providers.json.pk }}" config:- authentik_host: "{{ authentik_web_url }}"+ authentik_host: "{{ lookup('role_var', '_web_url', role='authentik') }}" status_code: 200 - name: Get admins group ID ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/core/groups/?name=admins"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/core/groups/?name=admins" method: GET headers: Authorization: "Bearer {{ authentik_admin_token }}"@@ -202,7 +204,7 @@ - name: Get all notification rules ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/events/rules/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/events/rules/" method: GET headers: Authorization: "Bearer {{ authentik_admin_token }}"@@ -213,7 +215,7 @@ - name: Update notification rules with admins group ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/events/rules/{{ item.pk }}/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/events/rules/{{ item.pk }}/" method: PATCH body_format: json headers:@@ -233,9 +235,10 @@ - "authentik" - "authentik-worker" -- name: Check if '{{ authentik_paths_location }}' exists+# To ensure bootstrap token is no longer set+- name: Check if '{{ lookup('role_var', '_paths_location', role='authentik') }}' exists ansible.builtin.stat:- path: "{{ authentik_paths_location }}"+ path: "{{ lookup('role_var', '_paths_location', role='authentik') }}" register: authentik_data_folder - name: Create Docker container@@ -247,7 +250,7 @@ - name: Validate Saltbox token (retry until Authentik is ready) ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/core/users/me/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/core/users/me/" method: GET headers: Authorization: "Bearer {{ authentik_admin_token }}"@@ -258,7 +261,7 @@ - name: Delete authentik-bootstrap-token ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/core/tokens/authentik-bootstrap-token/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/core/tokens/authentik-bootstrap-token/" method: DELETE headers: Authorization: "Bearer {{ authentik_admin_token }}"@@ -267,7 +270,7 @@ - name: Get Users ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/core/users/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/core/users/" method: GET headers: Authorization: "Bearer {{ authentik_admin_token }}"@@ -275,15 +278,15 @@ status_code: 200 register: get_users_response -- name: fact+- name: Set fact ansible.builtin.set_fact: akadmin_id: "{{ item.pk }}" loop: "{{ get_users_response.json.results }}"- when: item.username == authentik_default_user+ when: item.username == authentik_role_default_user - name: Disable akadmin user ansible.builtin.uri:- url: "{{ authentik_host }}/api/v3/core/users/{{ akadmin_id }}/"+ url: "{{ lookup('role_var', '_host', role='authentik') }}/api/v3/core/users/{{ akadmin_id }}/" method: PATCH body_format: json headers:
modified
roles/authentik_worker/defaults/main.yml
@@ -18,95 +18,58 @@ ################################ # Container-authentik_worker_docker_container: "{{ authentik_worker_name }}"+authentik_worker_role_docker_container: "{{ authentik_worker_name }}" # Image-authentik_worker_docker_image_pull: true-authentik_worker_docker_image_tag: "{{ authentik_docker_image_tag }}"-authentik_worker_docker_image: "ghcr.io/goauthentik/server:{{ authentik_worker_docker_image_tag }}"--# Ports-authentik_worker_docker_ports_defaults: []-authentik_worker_docker_ports_custom: []-authentik_worker_docker_ports: "{{ authentik_worker_docker_ports_defaults- + authentik_worker_docker_ports_custom }}"+authentik_worker_role_docker_image_pull: true+authentik_worker_role_docker_image_repo: "ghcr.io/goauthentik/server"+authentik_worker_role_docker_image_tag: "{{ lookup('role_var', '_docker_image_tag', role='authentik') }}"+authentik_worker_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='authentik_worker') }}:{{ lookup('role_var', '_docker_image_tag', role='authentik_worker') }}" # Envs-authentik_worker_docker_envs_default: "{{ authentik_docker_envs_default }}"-authentik_worker_docker_envs_custom: {}-authentik_worker_docker_envs: "{{ authentik_worker_docker_envs_default- | combine(authentik_worker_docker_envs_custom) }}"+authentik_worker_role_docker_envs_default: "{{ lookup('role_var', '_docker_envs_default', role='authentik') }}"+authentik_worker_role_docker_envs_custom: {}+authentik_worker_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='authentik_worker')+ | combine(lookup('role_var', '_docker_envs_custom', role='authentik_worker')) }}" # Commands-authentik_worker_docker_commands_default:+authentik_worker_role_docker_commands_default: - "worker"-authentik_worker_docker_commands_custom: []-authentik_worker_docker_commands: "{{ authentik_worker_docker_commands_default- + authentik_worker_docker_commands_custom }}"+authentik_worker_role_docker_commands_custom: []+authentik_worker_role_docker_commands: "{{ lookup('role_var', '_docker_commands_default', role='authentik_worker')+ + lookup('role_var', '_docker_commands_custom', role='authentik_worker') }}" # Volumes-authentik_worker_docker_volumes_default:- - "{{ server_appdata_path }}/authentik/media:/media"- - "{{ server_appdata_path }}/authentik/custom-templates:/templates"- - "{{ server_appdata_path }}/authentik/certs:/certs"+authentik_worker_role_docker_volumes_default:+ - "{{ authentik_role_paths_location }}/media:/media"+ - "{{ authentik_role_paths_location }}/custom-templates:/templates"+ - "{{ authentik_role_paths_location }}/certs:/certs" - "/var/run/docker.sock:/var/run/docker.sock"-authentik_worker_docker_volumes_custom: []-authentik_worker_docker_volumes: "{{ authentik_worker_docker_volumes_default- + authentik_worker_docker_volumes_custom }}"--# Devices-authentik_worker_docker_devices_default: []-authentik_worker_docker_devices_custom: []-authentik_worker_docker_devices: "{{ authentik_worker_docker_devices_default- + authentik_worker_docker_devices_custom }}"--# Hosts-authentik_worker_docker_hosts_default: {}-authentik_worker_docker_hosts_custom: {}-authentik_worker_docker_hosts: "{{ docker_hosts_common- | combine(authentik_worker_docker_hosts_default)- | combine(authentik_worker_docker_hosts_custom) }}"--# Labels-authentik_worker_docker_labels_default: {}-authentik_worker_docker_labels_custom: {}-authentik_worker_docker_labels: "{{ docker_labels_common- | combine(authentik_worker_docker_labels_default)- | combine(authentik_worker_docker_labels_custom) }}"+authentik_worker_role_docker_volumes_custom: []+authentik_worker_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='authentik_worker')+ + lookup('role_var', '_docker_volumes_custom', role='authentik_worker') }}" # Hostname-authentik_worker_docker_hostname: "{{ authentik_worker_name }}"+authentik_worker_role_docker_hostname: "{{ authentik_worker_name }}" # Networks-authentik_worker_docker_networks_alias: "{{ authentik_worker_name }}"-authentik_worker_docker_networks_default: []-authentik_worker_docker_networks_custom: []-authentik_worker_docker_networks: "{{ docker_networks_common- + authentik_worker_docker_networks_default- + authentik_worker_docker_networks_custom }}"--# Capabilities-authentik_worker_docker_capabilities_default: []-authentik_worker_docker_capabilities_custom: []-authentik_worker_docker_capabilities: "{{ authentik_worker_docker_capabilities_default- + authentik_worker_docker_capabilities_custom }}"--# Security Opts-authentik_worker_docker_security_opts_default: []-authentik_worker_docker_security_opts_custom: []-authentik_worker_docker_security_opts: "{{ authentik_worker_docker_security_opts_default- + authentik_worker_docker_security_opts_custom }}"+authentik_worker_role_docker_networks_alias: "{{ authentik_worker_name }}"+authentik_worker_role_docker_networks_default: []+authentik_worker_role_docker_networks_custom: []+authentik_worker_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='authentik_worker')+ + lookup('role_var', '_docker_networks_custom', role='authentik_worker') }}" # Restart Policy-authentik_worker_docker_restart_policy: unless-stopped+authentik_worker_role_docker_restart_policy: unless-stopped # State-authentik_worker_docker_state: started+authentik_worker_role_docker_state: started # Dependencies-authentik_worker_depends_on: "authentik-redis,authentik-postgres"-authentik_worker_depends_on_delay: "0"-authentik_worker_depends_on_healthchecks: "false"+authentik_worker_role_depends_on: "{{ lookup('role_var', '_postgres_name', role='authentik') }}"+authentik_worker_role_depends_on_delay: "0"+authentik_worker_role_depends_on_healthchecks: "true" # User-authentik_worker_docker_user: "root"+authentik_worker_role_docker_user: "root"
modified
roles/autobrr/defaults/main.yml
@@ -17,128 +17,86 @@ # Paths ################################ -autobrr_paths_folder: "{{ autobrr_name }}"-autobrr_paths_location: "{{ server_appdata_path }}/{{ autobrr_paths_folder }}"-autobrr_paths_config_location: "{{ autobrr_paths_location }}/config.toml"-autobrr_paths_folders_list:- - "{{ autobrr_paths_location }}"+autobrr_role_paths_folder: "{{ autobrr_name }}"+autobrr_role_paths_location: "{{ server_appdata_path }}/{{ autobrr_role_paths_folder }}"+autobrr_role_paths_config_location: "{{ autobrr_role_paths_location }}/config.toml"+autobrr_role_paths_folders_list:+ - "{{ autobrr_role_paths_location }}" ################################ # Web ################################ -autobrr_web_subdomain: "{{ autobrr_name }}"-autobrr_web_domain: "{{ user.domain }}"-autobrr_web_port: "7474"-autobrr_web_url: "{{ 'https://' + (autobrr_web_subdomain + '.' + autobrr_web_domain- if (autobrr_web_subdomain | length > 0)- else autobrr_web_domain) }}"+autobrr_role_web_subdomain: "{{ autobrr_name }}"+autobrr_role_web_domain: "{{ user.domain }}"+autobrr_role_web_port: "7474"+autobrr_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='autobrr') + '.' + lookup('role_var', '_web_domain', role='autobrr')+ if (lookup('role_var', '_web_subdomain', role='autobrr') | length > 0)+ else lookup('role_var', '_web_domain', role='autobrr')) }}" ################################ # DNS ################################ -autobrr_dns_record: "{{ autobrr_web_subdomain }}"-autobrr_dns_zone: "{{ autobrr_web_domain }}"-autobrr_dns_proxy: "{{ dns.proxied }}"+autobrr_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='autobrr') }}"+autobrr_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='autobrr') }}"+autobrr_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -autobrr_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-autobrr_traefik_middleware_default: "{{ traefik_default_middleware }}"-autobrr_traefik_middleware_custom: ""-autobrr_traefik_certresolver: "{{ traefik_default_certresolver }}"-autobrr_traefik_enabled: true-autobrr_traefik_api_enabled: true-autobrr_traefik_api_endpoint: "PathPrefix(`/api`)"+autobrr_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+autobrr_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+autobrr_role_traefik_middleware_custom: ""+autobrr_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+autobrr_role_traefik_enabled: true+autobrr_role_traefik_api_enabled: true+autobrr_role_traefik_api_endpoint: "PathPrefix(`/api`)" ################################ # Docker ################################ # Container-autobrr_docker_container: "{{ autobrr_name }}"+autobrr_role_docker_container: "{{ autobrr_name }}" # Image-autobrr_docker_image_pull: true-autobrr_docker_image_tag: "latest"-autobrr_docker_image: "ghcr.io/autobrr/autobrr:{{ autobrr_docker_image_tag }}"--# Ports-autobrr_docker_ports_defaults: []-autobrr_docker_ports_custom: []-autobrr_docker_ports: "{{ autobrr_docker_ports_defaults- + autobrr_docker_ports_custom }}"+autobrr_role_docker_image_pull: true+autobrr_role_docker_image_repo: "ghcr.io/autobrr/autobrr"+autobrr_role_docker_image_tag: "latest"+autobrr_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='autobrr') }}:{{ lookup('role_var', '_docker_image_tag', role='autobrr') }}" # Envs-autobrr_docker_envs_default:+autobrr_role_docker_envs_default:+ AUTOBRR__LOG_PATH: "/config/logs" PUID: "{{ uid }}" PGID: "{{ gid }}" TZ: "{{ tz }}"-autobrr_docker_envs_custom: {}-autobrr_docker_envs: "{{ autobrr_docker_envs_default- | combine(autobrr_docker_envs_custom) }}"--# Commands-autobrr_docker_commands_default: []-autobrr_docker_commands_custom: []-autobrr_docker_commands: "{{ autobrr_docker_commands_default- + autobrr_docker_commands_custom }}"+autobrr_role_docker_envs_custom: {}+autobrr_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='autobrr')+ | combine(lookup('role_var', '_docker_envs_custom', role='autobrr')) }}" # Volumes-autobrr_docker_volumes_default:- - "{{ autobrr_paths_location }}:/config"-autobrr_docker_volumes_custom: []-autobrr_docker_volumes: "{{ autobrr_docker_volumes_default- + autobrr_docker_volumes_custom }}"--# Devices-autobrr_docker_devices_default: []-autobrr_docker_devices_custom: []-autobrr_docker_devices: "{{ autobrr_docker_devices_default- + autobrr_docker_devices_custom }}"--# Hosts-autobrr_docker_hosts_default: {}-autobrr_docker_hosts_custom: {}-autobrr_docker_hosts: "{{ docker_hosts_common- | combine(autobrr_docker_hosts_default)- | combine(autobrr_docker_hosts_custom) }}"--# Labels-autobrr_docker_labels_default: {}-autobrr_docker_labels_custom: {}-autobrr_docker_labels: "{{ docker_labels_common- | combine(autobrr_docker_labels_default)- | combine(autobrr_docker_labels_custom) }}"+autobrr_role_docker_volumes_default:+ - "{{ autobrr_role_paths_location }}:/config"+autobrr_role_docker_volumes_custom: []+autobrr_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='autobrr')+ + lookup('role_var', '_docker_volumes_custom', role='autobrr') }}" # Hostname-autobrr_docker_hostname: "{{ autobrr_name }}"+autobrr_role_docker_hostname: "{{ autobrr_name }}" # Networks-autobrr_docker_networks_alias: "{{ autobrr_name }}"-autobrr_docker_networks_default: []-autobrr_docker_networks_custom: []-autobrr_docker_networks: "{{ docker_networks_common- + autobrr_docker_networks_default- + autobrr_docker_networks_custom }}"--# Capabilities-autobrr_docker_capabilities_default: []-autobrr_docker_capabilities_custom: []-autobrr_docker_capabilities: "{{ autobrr_docker_capabilities_default- + autobrr_docker_capabilities_custom }}"--# Security Opts-autobrr_docker_security_opts_default: []-autobrr_docker_security_opts_custom: []-autobrr_docker_security_opts: "{{ autobrr_docker_security_opts_default- + autobrr_docker_security_opts_custom }}"+autobrr_role_docker_networks_alias: "{{ autobrr_name }}"+autobrr_role_docker_networks_default: []+autobrr_role_docker_networks_custom: []+autobrr_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='autobrr')+ + lookup('role_var', '_docker_networks_custom', role='autobrr') }}" # Restart Policy-autobrr_docker_restart_policy: unless-stopped+autobrr_role_docker_restart_policy: unless-stopped # State-autobrr_docker_state: started+autobrr_role_docker_state: started
modified
roles/autobrr/tasks/main.yml
@@ -10,47 +10,26 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml" - name: Check if already installed ansible.builtin.stat:- path: "{{ autobrr_paths_location }}"+ path: "{{ lookup('role_var', '_paths_location', role='autobrr') }}" register: autobrr_directory - name: Create directories ansible.builtin.include_tasks: "{{ resources_tasks_path }}/directories/create_directories.yml" -- name: Check if `{{ autobrr_paths_config_location | basename }}` exists- ansible.builtin.stat:- path: "{{ autobrr_paths_config_location }}"- register: autobrr_config--- name: New `{{ autobrr_paths_config_location | basename }}` tasks- when: not autobrr_config.stat.exists- block:- - name: Generate sessionSecret- ansible.builtin.shell: "head /dev/urandom | tr -dc A-Za-z0-9 | head -c16"- register: autobrr_secret-- - name: Import default `{{ autobrr_paths_config_location | basename }}`- ansible.builtin.template:- src: config.toml.j2- dest: "{{ autobrr_paths_config_location }}"- owner: "{{ user.name }}"- group: "{{ user.name }}"- mode: "0664"- - name: Create Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" -- name: Advise user to create Autobrr user- ansible.builtin.debug:- msg:- - "Run the following to create your user:"- - "docker exec -it {{ lookup('vars', role_name + '_name') }} sh -c 'autobrrctl --config /config create-user {{ user.name }}'"- when: not autobrr_directory.stat.exists+- name: Create Autobrr user+ ansible.builtin.shell: |+ docker exec -i {{ autobrr_name }} sh -c 'echo "{{ user.pass }}" | autobrrctl --config /config create-user {{ user.name }}'+ no_log: true+ when: (not autobrr_directory.stat.exists)
modified
roles/autoheal/defaults/main.yml
@@ -18,85 +18,41 @@ ################################ # Container-autoheal_docker_container: "{{ autoheal_name }}"+autoheal_role_docker_container: "{{ autoheal_name }}" # Image-autoheal_docker_image_pull: true-autoheal_docker_image_tag: "latest"-autoheal_docker_image: "willfarrell/autoheal:{{ autoheal_docker_image_tag }}"--# Ports-autoheal_docker_ports_defaults: []-autoheal_docker_ports_custom: []-autoheal_docker_ports: "{{ autoheal_docker_ports_defaults- + autoheal_docker_ports_custom }}"+autoheal_role_docker_image_pull: true+autoheal_role_docker_image_tag: "latest"+autoheal_role_docker_image: "willfarrell/autoheal:{{ autoheal_role_docker_image_tag }}" # Envs-autoheal_docker_envs_default:+autoheal_role_docker_envs_default: AUTOHEAL_CONTAINER_LABEL: "autoheal"-autoheal_docker_envs_custom: {}-autoheal_docker_envs: "{{ autoheal_docker_envs_default- | combine(autoheal_docker_envs_custom) }}"--# Commands-autoheal_docker_commands_default: []-autoheal_docker_commands_custom: []-autoheal_docker_commands: "{{ autoheal_docker_commands_default- + autoheal_docker_commands_custom }}"+autoheal_role_docker_envs_custom: {}+autoheal_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='autoheal')+ | combine(lookup('role_var', '_docker_envs_custom', role='autoheal')) }}" # Volumes-autoheal_docker_volumes_default:+autoheal_role_docker_volumes_default: - "/var/run/docker.sock:/var/run/docker.sock" - "/etc/localtime:/etc/localtime:ro"-autoheal_docker_volumes_custom: []-autoheal_docker_volumes: "{{ autoheal_docker_volumes_default- + autoheal_docker_volumes_custom }}"--# Devices-autoheal_docker_devices_default: []-autoheal_docker_devices_custom: []-autoheal_docker_devices: "{{ autoheal_docker_devices_default- + autoheal_docker_devices_custom }}"--# Hosts-autoheal_docker_hosts_default: {}-autoheal_docker_hosts_custom: {}-autoheal_docker_hosts: "{{ docker_hosts_common- | combine(autoheal_docker_hosts_default)- | combine(autoheal_docker_hosts_custom) }}"--# Labels-autoheal_docker_labels_default: {}-autoheal_docker_labels_custom: {}-autoheal_docker_labels: "{{ docker_labels_common- | combine(autoheal_docker_labels_default)- | combine(autoheal_docker_labels_custom) }}"+autoheal_role_docker_volumes_custom: []+autoheal_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='autoheal')+ + lookup('role_var', '_docker_volumes_custom', role='autoheal') }}" # Hostname-autoheal_docker_hostname: "{{ autoheal_name }}"+autoheal_role_docker_hostname: "{{ autoheal_name }}" # Networks-autoheal_docker_networks_alias: "{{ autoheal_name }}"-autoheal_docker_networks_default: []-autoheal_docker_networks_custom: []-autoheal_docker_networks: "{{ docker_networks_common- + autoheal_docker_networks_default- + autoheal_docker_networks_custom }}"--# Capabilities-autoheal_docker_capabilities_default: []-autoheal_docker_capabilities_custom: []-autoheal_docker_capabilities: "{{ autoheal_docker_capabilities_default- + autoheal_docker_capabilities_custom }}"--# Security Opts-autoheal_docker_security_opts_default: []-autoheal_docker_security_opts_custom: []-autoheal_docker_security_opts: "{{ autoheal_docker_security_opts_default- + autoheal_docker_security_opts_custom }}"+autoheal_role_docker_networks_alias: "{{ autoheal_name }}"+autoheal_role_docker_networks_default: []+autoheal_role_docker_networks_custom: []+autoheal_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='autoheal')+ + lookup('role_var', '_docker_networks_custom', role='autoheal') }}" # Restart Policy-autoheal_docker_restart_policy: unless-stopped+autoheal_role_docker_restart_policy: unless-stopped # State-autoheal_docker_state: started+autoheal_role_docker_state: started
modified
roles/autoscan/defaults/main.yml
@@ -17,142 +17,101 @@ # Paths ################################ -autoscan_paths_folder: "{{ autoscan_name }}"-autoscan_paths_location: "{{ server_appdata_path }}/{{ autoscan_paths_folder }}"-autoscan_paths_folders_list:- - "{{ autoscan_paths_location }}"-autoscan_paths_config_location: "{{ autoscan_paths_location }}/config.yml"+autoscan_role_paths_folder: "{{ autoscan_name }}"+autoscan_role_paths_location: "{{ server_appdata_path }}/{{ autoscan_role_paths_folder }}"+autoscan_role_paths_folders_list:+ - "{{ autoscan_role_paths_location }}"+autoscan_role_paths_config_location: "{{ autoscan_role_paths_location }}/config.yml" ################################ # Web ################################ -autoscan_web_subdomain: "{{ autoscan_name }}"-autoscan_web_domain: "{{ user.domain }}"-autoscan_web_port: "3030"-autoscan_web_url: "{{ 'https://' + (lookup('vars', autoscan_name + '_web_subdomain', default=autoscan_web_subdomain) + '.' + lookup('vars', autoscan_name + '_web_domain', default=autoscan_web_domain)- if (lookup('vars', autoscan_name + '_web_subdomain', default=autoscan_web_subdomain) | length > 0)- else lookup('vars', autoscan_name + '_web_domain', default=autoscan_web_domain)) }}"+autoscan_role_web_subdomain: "{{ autoscan_name }}"+autoscan_role_web_domain: "{{ user.domain }}"+autoscan_role_web_port: "3030"+autoscan_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='autoscan') + '.' + lookup('role_var', '_web_domain', role='autoscan')+ if (lookup('role_var', '_web_subdomain', role='autoscan') | length > 0)+ else lookup('role_var', '_web_domain', role='autoscan')) }}" ################################ # DNS ################################ -autoscan_dns_record: "{{ lookup('vars', autoscan_name + '_web_subdomain', default=autoscan_web_subdomain) }}"-autoscan_dns_zone: "{{ lookup('vars', autoscan_name + '_web_domain', default=autoscan_web_domain) }}"-autoscan_dns_proxy: "{{ dns.proxied }}"+autoscan_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='autoscan') }}"+autoscan_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='autoscan') }}"+autoscan_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -autoscan_traefik_regex_middleware_string: ",{{ autoscan_name }}-replacepathregex"-autoscan_traefik_sso_middleware: ""-autoscan_traefik_middleware_default: "{{ traefik_default_middleware + autoscan_traefik_regex_middleware_string }}"-autoscan_traefik_middleware_custom: ""-autoscan_traefik_certresolver: "{{ traefik_default_certresolver }}"-autoscan_traefik_enabled: true-autoscan_traefik_api_enabled: false-autoscan_traefik_api_endpoint: ""+autoscan_role_traefik_regex_middleware_string: ",{{ autoscan_name }}-replacepathregex"+autoscan_role_traefik_sso_middleware: ""+autoscan_role_traefik_middleware_default: "{{ traefik_default_middleware + autoscan_role_traefik_regex_middleware_string }}"+autoscan_role_traefik_middleware_custom: ""+autoscan_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+autoscan_role_traefik_enabled: true+autoscan_role_traefik_api_enabled: false+autoscan_role_traefik_api_endpoint: "" ################################ # Docker ################################ # Container-autoscan_docker_container: "{{ autoscan_name }}"+autoscan_role_docker_container: "{{ autoscan_name }}" # Image-autoscan_docker_image_pull: true-autoscan_docker_image_tag: "latest"-autoscan_docker_image: "saltydk/autoscan:{{ lookup('vars', autoscan_name + '_docker_image_tag', default=autoscan_docker_image_tag) }}"--# Ports-autoscan_docker_ports_defaults: []-autoscan_docker_ports_custom: []-autoscan_docker_ports: "{{ lookup('vars', autoscan_name + '_docker_ports_defaults', default=autoscan_docker_ports_defaults)- + lookup('vars', autoscan_name + '_docker_ports_custom', default=autoscan_docker_ports_custom) }}"+autoscan_role_docker_image_pull: true+autoscan_role_docker_image_repo: "saltydk/autoscan"+autoscan_role_docker_image_tag: "latest"+autoscan_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='autoscan') }}:{{ lookup('role_var', '_docker_image_tag', role='autoscan') }}" # Envs-autoscan_docker_envs_default:+autoscan_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" TZ: "{{ tz }}"-autoscan_docker_envs_custom: {}-autoscan_docker_envs: "{{ lookup('vars', autoscan_name + '_docker_envs_default', default=autoscan_docker_envs_default)- | combine(lookup('vars', autoscan_name + '_docker_envs_custom', default=autoscan_docker_envs_custom)) }}"--# Commands-autoscan_docker_commands_default: []-autoscan_docker_commands_custom: []-autoscan_docker_commands: "{{ lookup('vars', autoscan_name + '_docker_commands_default', default=autoscan_docker_commands_default)- + lookup('vars', autoscan_name + '_docker_commands_custom', default=autoscan_docker_commands_custom) }}"+autoscan_role_docker_envs_custom: {}+autoscan_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='autoscan')+ | combine(lookup('role_var', '_docker_envs_custom', role='autoscan')) }}" # Volumes-autoscan_docker_volumes_default:- - "{{ autoscan_paths_location }}:/config"-autoscan_docker_volumes_custom: []-autoscan_docker_volumes: "{{ lookup('vars', autoscan_name + '_docker_volumes_default', default=autoscan_docker_volumes_default)- + lookup('vars', autoscan_name + '_docker_volumes_custom', default=autoscan_docker_volumes_custom) }}"--# Devices-autoscan_docker_devices_default: []-autoscan_docker_devices_custom: []-autoscan_docker_devices: "{{ lookup('vars', autoscan_name + '_docker_devices_default', default=autoscan_docker_devices_default)- + lookup('vars', autoscan_name + '_docker_devices_custom', default=autoscan_docker_devices_custom) }}"--# Hosts-autoscan_docker_hosts_default: {}-autoscan_docker_hosts_custom: {}-autoscan_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', autoscan_name + '_docker_hosts_default', default=autoscan_docker_hosts_default))- | combine(lookup('vars', autoscan_name + '_docker_hosts_custom', default=autoscan_docker_hosts_custom)) }}"+autoscan_role_docker_volumes_default:+ - "{{ autoscan_role_paths_location }}:/config"+autoscan_role_docker_volumes_custom: []+autoscan_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='autoscan')+ + lookup('role_var', '_docker_volumes_custom', role='autoscan') }}" # Labels-autoscan_docker_labels_default:+autoscan_role_docker_labels_default: - '{ "traefik.http.middlewares.{{ traefik_router }}-replacepathregex.replacepathregex.regex": "^/$" }' - '{ "traefik.http.middlewares.{{ traefik_router }}-replacepathregex.replacepathregex.replacement": "/triggers/manual" }' - '{ "traefik.http.routers.{{ traefik_router }}-triggers.entrypoints": "{{ traefik_entrypoint_websecure }}" }' - '{ "traefik.http.routers.{{ traefik_router }}-triggers.service": "{{ traefik_router }}" }' - '{ "traefik.http.routers.{{ traefik_router }}-triggers.rule": "Host(`{{ traefik_host }}`) && PathPrefix(`/triggers`)" }'- - '{ "traefik.http.routers.{{ traefik_router }}-triggers.priority": "{{ lookup("vars", traefik_role_var + "_traefik_priority", default=lookup("vars", role_name + "_traefik_priority", default="40")) }}" }'- - '{ "traefik.http.routers.{{ traefik_router }}-triggers.tls.certresolver": "{{ lookup("vars", traefik_role_var + "_traefik_certresolver", default=lookup("vars", role_name + "_traefik_certresolver", default=traefik_default_certresolver)) }}" }'+ - '{ "traefik.http.routers.{{ traefik_router }}-triggers.priority": "{{ lookup("role_var", "_traefik_priority", role="autoscan", default="40") }}" }'+ - '{ "traefik.http.routers.{{ traefik_router }}-triggers.tls.certresolver": "{{ lookup("role_var", "_traefik_certresolver", role="autoscan", default=traefik_default_certresolver) }}" }' - '{ "traefik.http.routers.{{ traefik_router }}-triggers.tls.options": "securetls@file" }'- - '{ "traefik.http.routers.{{ traefik_router }}-triggers.middlewares": "{{ traefik_middleware | regex_replace(autoscan_traefik_regex_middleware_string) }}" }'-autoscan_docker_labels_custom: {}-autoscan_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', autoscan_name + '_docker_labels_default', default=autoscan_docker_labels_default))- | combine(lookup('vars', autoscan_name + '_docker_labels_custom', default=autoscan_docker_labels_custom)) }}"+ - '{ "traefik.http.routers.{{ traefik_router }}-triggers.middlewares": "{{ traefik_middleware | regex_replace(autoscan_role_traefik_regex_middleware_string) }}" }'+autoscan_role_docker_labels_custom: {}+autoscan_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='autoscan')+ | combine(lookup('role_var', '_docker_labels_custom', role='autoscan')) }}" # Hostname-autoscan_docker_hostname: "{{ autoscan_name }}"--# Network Mode-autoscan_docker_network_mode_default: "{{ docker_networks_name_common }}"-autoscan_docker_network_mode: "{{ lookup('vars', autoscan_name + '_docker_network_mode_default', default=autoscan_docker_network_mode_default) }}"+autoscan_role_docker_hostname: "{{ autoscan_name }}" # Networks-autoscan_docker_networks_alias: "{{ autoscan_name }}"-autoscan_docker_networks_default: []-autoscan_docker_networks_custom: []-autoscan_docker_networks: "{{ docker_networks_common- + lookup('vars', autoscan_name + '_docker_networks_default', default=autoscan_docker_networks_default)- + lookup('vars', autoscan_name + '_docker_networks_custom', default=autoscan_docker_networks_custom) }}"--# Capabilities-autoscan_docker_capabilities_default: []-autoscan_docker_capabilities_custom: []-autoscan_docker_capabilities: "{{ lookup('vars', autoscan_name + '_docker_capabilities_default', default=autoscan_docker_capabilities_default)- + lookup('vars', autoscan_name + '_docker_capabilities_custom', default=autoscan_docker_capabilities_custom) }}"--# Security Opts-autoscan_docker_security_opts_default: []-autoscan_docker_security_opts_custom: []-autoscan_docker_security_opts: "{{ lookup('vars', autoscan_name + '_docker_security_opts_default', default=autoscan_docker_security_opts_default)- + lookup('vars', autoscan_name + '_docker_security_opts_custom', default=autoscan_docker_security_opts_custom) }}"+autoscan_role_docker_networks_alias: "{{ autoscan_name }}"+autoscan_role_docker_networks_default: []+autoscan_role_docker_networks_custom: []+autoscan_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='autoscan')+ + lookup('role_var', '_docker_networks_custom', role='autoscan') }}" # Restart Policy-autoscan_docker_restart_policy: unless-stopped+autoscan_role_docker_restart_policy: unless-stopped # State-autoscan_docker_state: started+autoscan_role_docker_state: started
modified
roles/autoscan/tasks/main2.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -21,7 +21,7 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/directories/create_directories.yml" - name: Import Settings task- ansible.builtin.import_tasks: "subtasks/settings.yml"+ ansible.builtin.include_tasks: "subtasks/settings.yml" - name: Create Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml"
modified
roles/autoscan/tasks/subtasks/settings.yml
@@ -7,12 +7,12 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Settings | Check if `{{ autoscan_paths_config_location | basename }}` exists+- name: Settings | Check if `{{ lookup('role_var', '_paths_config_location', role='autoscan') | basename }}` exists ansible.builtin.stat:- path: "{{ autoscan_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='autoscan') }}" register: autoscan_config -- name: Settings | New `{{ autoscan_paths_config_location | basename }}` tasks+- name: Settings | New `{{ lookup('role_var', '_paths_config_location', role='autoscan') | basename }}` tasks when: (not autoscan_config.stat.exists) block: - name: Settings | Get Instance Info@@ -21,10 +21,10 @@ get_info_list: - plex - - name: Settings | Import default `{{ autoscan_paths_config_location | basename }}`+ - name: Settings | Import default `{{ lookup('role_var', '_paths_config_location', role='autoscan') | basename }}` ansible.builtin.template: src: config.yml.j2- dest: "{{ autoscan_paths_config_location }}"+ dest: "{{ lookup('role_var', '_paths_config_location', role='autoscan') }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"
modified
roles/autoscan/templates/config.yml.j2
@@ -41,12 +41,12 @@ # rewrite inotify path to unified filesystem rewrite:- - from: ^/mnt/local/Media/+ - from: {{ '^' + server_local_folder_path }}/Media/ to: /mnt/unionfs/Media/ # Local filesystem paths to monitor paths:- - path: /mnt/local/Media+ - path: {{ server_local_folder_path }}/Media sonarr: {% for instance in sonarr_instances %}
modified
roles/backup/defaults/main.yml
@@ -21,8 +21,8 @@ ################################ backup_size_exclude_folders:- - "/opt/plex/Library/Application Support/Plex Media Server/Cache/PhotoTranscoder"- - "/opt/plex/Library/Application Support/Plex Media Server/Cache/Transcode"+ - "{{ server_appdata_path }}/plex/Library/Application Support/Plex Media Server/Cache/PhotoTranscoder"+ - "{{ server_appdata_path }}/plex/Library/Application Support/Plex Media Server/Cache/Transcode" ################################ # Notifications@@ -57,5 +57,5 @@ ################################ snapshot_type: ""-backup_opt_path: "/opt/"+backup_opt_path: "{{ server_appdata_path }}/" use_snapshot: false
modified
roles/backup/tasks/main.yml
@@ -22,6 +22,11 @@ - "Backup Role cannot run without a tag." - "Use backup or backup2 tag." +- name: Import cloudplow role variables+ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/variables/import_role_vars.yml"+ vars:+ import_role_name: cloudplow+ - name: Backup block: - name: "Set 'backup_success' variable"@@ -29,10 +34,10 @@ backup_success: false - name: Sanity Check- ansible.builtin.import_tasks: "sanity_check.yml"+ ansible.builtin.include_tasks: "sanity_check.yml" - name: Variables- ansible.builtin.import_tasks: "variables.yml"+ ansible.builtin.include_tasks: "variables.yml" tags: - set-backup - unset-backup@@ -41,14 +46,14 @@ - wipe-restore-service - name: Import Restore Service Cleanup- ansible.builtin.import_tasks: "wipe_restore_service.yml"+ ansible.builtin.include_tasks: "wipe_restore_service.yml" when: ('wipe-restore-service' in ansible_run_tags) and not ('backup' in ansible_run_tags) tags: - wipe-restore-service - name: Cron- ansible.builtin.import_tasks: "cron.yml"- when: (['set-backup', 'unset-backup'] | intersect(ansible_run_tags)) and not ('backup' in ansible_run_tags)+ ansible.builtin.include_tasks: "cron.yml"+ when: ((['set-backup', 'unset-backup'] | intersect(ansible_run_tags)) | length > 0) and not ('backup' in ansible_run_tags) tags: - set-backup - unset-backup@@ -62,7 +67,7 @@ start_time: "{{ start_time_lookup.stdout }}" - name: Snapshot- ansible.builtin.import_tasks: "snapshot.yml"+ ansible.builtin.include_tasks: "snapshot.yml" - name: "Notify | Saltbox Backup: Started Saltbox backup task" ansible.builtin.include_role:@@ -120,12 +125,12 @@ - "/home/{{ user.name }}/logs" - "{{ backup.local.destination }}" - "{{ backup.local.destination }}/opt"- - "/opt/systemd-backup"- - "/opt/crontab-backup"+ - "{{ server_appdata_path }}/systemd-backup"+ - "{{ server_appdata_path }}/crontab-backup" # Check if there is enough space in the local backup destination- - name: Get size of opt folder- ansible.builtin.shell: "du -sk /opt || true"+ - name: Get size of {{ server_appdata_path }} folder+ ansible.builtin.shell: "du -sk {{ server_appdata_path }} || true" register: estimated_backup_size - name: Check existence of exclude folders@@ -235,13 +240,13 @@ else (playbook_dir + '/roles/backup/files/backup_excludes_list.txt') }}" - name: Saltbox Restore Service- ansible.builtin.import_tasks: "restore_service.yml"+ ansible.builtin.include_tasks: "restore_service.yml" when: restore_service_enabled tags: - restore-service - saltbox-restore-service - - name: "Synchronize '/etc/systemd/system' to '/opt/systemd-backup' for inclusion in backup"+ - name: "Synchronize '/etc/systemd/system' to '{{ server_appdata_path }}/systemd-backup' for inclusion in backup" ansible.builtin.shell: | /usr/bin/rsync \ --delay-updates \@@ -254,13 +259,13 @@ --exclude='saltbox_managed_*' \ --include='*.service' \ --include='*.mount' \- /etc/systemd/system/* /opt/systemd-backup/+ /etc/systemd/system/* {{ server_appdata_path }}/systemd-backup/ args: executable: /bin/bash ignore_errors: true - - name: "Copying crontabs to '/opt/crontab-backup' for inclusion in backup"- ansible.builtin.shell: "cp -f /var/spool/cron/crontabs/* /opt/crontab-backup"+ - name: "Copying crontabs to '{{ server_appdata_path }}/crontab-backup' for inclusion in backup"+ ansible.builtin.shell: "cp -f /var/spool/cron/crontabs/* {{ server_appdata_path }}/crontab-backup" ignore_errors: true - name: "Reset permissions of folders"@@ -272,61 +277,84 @@ mode: "0775" recurse: true with_items:- - "/opt/systemd-backup"- - "/opt/crontab-backup"-- # Stop Containers-- - name: "Gather list of running Docker containers"- ansible.builtin.shell: "docker ps --format '{{ '{{' }} .Names{{ '}}' }}' --filter label=com.github.saltbox.saltbox_managed=true | xargs echo -n"- register: docker_containers- ignore_errors: true-- - name: Set 'docker_containers' variable- ansible.builtin.set_fact:- docker_containers: "{{ docker_containers.stdout if (docker_containers is success) else '' }}"-- - name: Docker container tasks- when: (docker_containers | trim | length > 0)+ - "{{ server_appdata_path }}/systemd-backup"+ - "{{ server_appdata_path }}/crontab-backup"++ - name: Populate Service Facts+ ansible.builtin.service_facts:++ - name: Set docker_service_running+ ansible.builtin.set_fact:+ docker_service_running: "{{ (ansible_facts['services']['docker.service'] is defined) and (ansible_facts['services']['docker.service']['state'] == 'running') }}"++ - name: Stop Docker Container Tasks+ when: docker_service_running block:- - name: Convert Docker containers string into a list+ - name: "Gather Docker container information"+ community.docker.docker_host_info:+ containers: true+ containers_filters:+ label:+ - "com.github.saltbox.saltbox_managed=true"+ status:+ - "running"+ register: docker_info++ - name: Extract and process container names ansible.builtin.set_fact:- docker_containers: "{{ docker_containers.split() | reject('in', gluetun_instances | default(['gluetun'])) | sort }}"-- - name: Filter out ignored apps from Docker containers list+ docker_containers: "{{ docker_info.containers | map(attribute='Names') | map('first') | map('regex_replace', '^/', '') | reject('in', gluetun_instances | default(['gluetun'])) | sort }}"+ when:+ - (docker_info is success)+ - (docker_info.containers is defined)+ - (docker_info.containers | length > 0)++ - name: Set empty list if no containers found ansible.builtin.set_fact:- docker_containers: "{{ docker_containers | difference(reverse_proxy_apps + torrent_apps + backup_ignore_containers) }}"- ignore_containers: "{{ reverse_proxy_apps + torrent_apps + backup_ignore_containers }}"-- - name: Convert Docker containers list back to string- ansible.builtin.set_fact:- docker_containers_string: "{{ docker_containers | join(' ') }}"- ignore_query_string: >-- {% if docker_containers | length > 0 and ignore_containers | length > 0 %}- {%- set ignore_params = [] -%}- {% for container in ignore_containers %}- {%- set _ = ignore_params.append('ignore=' ~ container) -%}- {%- endfor -%}- {{ ignore_params | join('&') }}- {%- else -%}- {% endif %}-- - name: Stop Saltbox Docker containers- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/stop_saltbox_docker_containers.yml"- vars:- _query_var: "{{ '?' + ignore_query_string if (ignore_query_string | length > 0) else '' }}"-- - name: "Stop all running Docker containers"- ansible.builtin.shell: "docker stop {{ docker_containers_string }}"- ignore_errors: true- when: (docker_containers_string | trim | length > 0)-- - name: "Notify | Saltbox Backup: Stopped Docker containers"- ansible.builtin.include_role:- name: notify- vars:- message: "{{ backup_instance }} Backup: Stopped Docker containers."- when: backup_notify_stop_docker_containers+ docker_containers: []+ when: (docker_info is not success) or (docker_info.containers is not defined) or (docker_info.containers | length == 0)++ - name: Docker container tasks+ when: (docker_containers | length > 0)+ block:+ - name: Build list of containers to ignore+ ansible.builtin.set_fact:+ ignore_containers: "{{ reverse_proxy_apps + torrent_apps + backup_ignore_containers }}"++ - name: Filter out ignored apps from Docker containers list+ ansible.builtin.set_fact:+ docker_containers: "{{ docker_containers | difference(ignore_containers) }}"++ - name: Convert Docker containers list back to string+ ansible.builtin.set_fact:+ docker_containers_string: "{{ docker_containers | join(' ') }}"+ ignore_query_string: >-+ {% if docker_containers | length > 0 and ignore_containers | length > 0 %}+ {%- set ignore_params = [] -%}+ {% for container in ignore_containers %}+ {%- set _ = ignore_params.append('ignore=' ~ container) -%}+ {%- endfor -%}+ {{ ignore_params | join('&') }}+ {%- else -%}+ {% endif %}++ - name: Stop Saltbox Docker containers+ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/stop_saltbox_docker_containers.yml"+ vars:+ _query_var: "{{ '?' + ignore_query_string+ if (ignore_query_string | length > 0)+ else '' }}"++ - name: "Stop all running Docker containers"+ ansible.builtin.shell: "docker stop {{ docker_containers_string }}"+ ignore_errors: true+ when: (docker_containers_string | trim | length > 0)++ - name: "Notify | Saltbox Backup: Stopped Docker containers"+ ansible.builtin.include_role:+ name: notify+ vars:+ message: "{{ backup_instance }} Backup: Stopped Docker containers."+ when: backup_notify_stop_docker_containers # Custom User Files @@ -342,16 +370,11 @@ - "{{ backup_user_defined_files }}" when: (backup_user_defined_files | length > 0) - # Services-- - name: Populate Service Facts- ansible.builtin.service_facts:-- # Stop Cloudplow+ # Stop Cloudplow - name: Check if 'cloudplow.service' exists ansible.builtin.stat:- path: "/etc/systemd/system/cloudplow.service"+ path: "/etc/systemd/system/{{ cloudplow_service_name }}.service" register: cloudplow_service - name: Stop 'cloudplow' service block@@ -359,11 +382,11 @@ block: - name: Get 'cloudplow' service state ansible.builtin.set_fact:- cloudplow_service_running: "{{ (services['cloudplow.service'] is defined) and (services['cloudplow.service']['state'] == 'running') }}"+ cloudplow_service_running: "{{ (ansible_facts['services'][cloudplow_service_name + '.service'] is defined) and (ansible_facts['services'][cloudplow_service_name + '.service']['state'] == 'running') }}" - name: Stop 'cloudplow' service ansible.builtin.systemd_service:- name: cloudplow+ name: "{{ cloudplow_service_name }}" state: stopped when: cloudplow_service_running @@ -391,7 +414,7 @@ # Start Docker containers when snapshot is enabled - name: Snapshot | Start Docker containers- when: use_snapshot+ when: docker_service_running and use_snapshot block: - name: "Snapshot | Wait for 5 seconds before starting Docker containers" ansible.builtin.wait_for:@@ -510,7 +533,7 @@ # Start Docker containers when snapshot is not enabled - name: Start Docker Containers- when: (not use_snapshot)+ when: docker_service_running and (not use_snapshot) block: - name: "Wait for 5 seconds before starting Docker containers" ansible.builtin.wait_for:@@ -535,7 +558,7 @@ - name: "Wait for 10 seconds before uploads" ansible.builtin.wait_for: timeout: 10- when: backup.rclone.enable or backup.rsync.enable+ when: backup_rclone_enabled or backup_rsync_enabled - name: "Reset folder ownership of '{{ backup.local.destination }}/'" ansible.builtin.shell: "chown -R {{ user.name }}:{{ user.name }} {{ backup.local.destination }}/"@@ -561,7 +584,7 @@ become_user: "{{ user.name }}" register: rclone_timestamp ignore_errors: true- when: backup.rclone.enable+ when: backup_rclone_enabled - name: Define Archive List ansible.builtin.set_fact:@@ -592,13 +615,13 @@ register: rclone_move failed_when: rclone_move.rc > 3 ignore_errors: true- when: backup.rclone.enable and (rclone_timestamp is defined) and ('Failed' not in rclone_timestamp.stderr)+ when: backup_rclone_enabled and (rclone_timestamp is defined) and ('Failed' not in rclone_timestamp.stderr) loop: "{{ backup_archive_list + (backup_user_defined_files | map('basename') | list) }}" - name: "Wait for 5 seconds before uploading" ansible.builtin.wait_for: timeout: 5- when: backup.rclone.enable or backup.rsync.enable+ when: backup_rclone_enabled or backup_rsync_enabled - name: "Use rclone to upload backup to '{{ backup.rclone.destination }}'" ansible.builtin.shell: |@@ -608,21 +631,21 @@ {{ lookup('vars', 'backup_' + backup.rclone.template + '_template', default='') }} \ --stats=30s \ --bwlimit={{ backup_rclone_upload_speed_limit }} \- {{ '--bind=' + ansible_default_ipv4.address if mounts.ipv4_only else '' }} \+ {{ '--bind=' + ansible_facts['default_ipv4']['address'] if rclone_mounts_ipv4_only else '' }} \ -vv \ --log-file='{{ playbook_dir }}/backup_rclone.log' \ '{{ backup.local.destination }}' '{{ backup.rclone.destination }}' environment: "{{ backup_rclone_env }}" become: true become_user: "{{ user.name }}"- when: backup.rclone.enable+ when: backup_rclone_enabled - name: "Notify | Saltbox Backup: Rclone uploaded backup to '{{ backup.rclone.destination }}'" ansible.builtin.include_role: name: notify vars: message: "{{ backup_instance }} Backup: Rclone uploaded backup to '{{ backup.rclone.destination }}'."- when: backup.rclone.enable and backup_notify_rclone_complete+ when: backup_rclone_enabled and backup_notify_rclone_complete - name: "Use rsync to upload backup to '{{ backup.rsync.destination }}'" ansible.posix.synchronize:@@ -633,14 +656,14 @@ dest_port: "{{ backup.rsync.port }}" become: true become_user: "{{ user.name }}"- when: backup.rsync.enable+ when: backup_rsync_enabled - name: "Notify | Saltbox Backup: Rsync uploaded backup to '{{ backup.rsync.destination }}'" ansible.builtin.include_role: name: notify vars: message: "{{ backup_instance }} Backup: Rsync uploaded backup to '{{ backup.rsync.destination }}'."- when: backup.rsync.enable and backup_notify_rsync_complete+ when: backup_rsync_enabled and backup_notify_rsync_complete - name: Get Current Time ansible.builtin.shell: "date \"+%s\""@@ -662,7 +685,7 @@ - name: "Start 'cloudplow' service" ansible.builtin.systemd_service:- name: cloudplow+ name: "{{ cloudplow_service_name }}" state: started when: (cloudplow_service is defined) and cloudplow_service.stat.exists and cloudplow_service_running @@ -670,7 +693,7 @@ ansible.builtin.file: path: "{{ backup.local.destination }}" state: absent- when: (dir_files2.matched | int != 0) and (not backup.local.enable)+ when: (dir_files2.matched | int != 0) and (not backup_local_enabled) - name: "Set 'backup_success' variable" ansible.builtin.set_fact:@@ -709,7 +732,7 @@ ignore_errors: true - name: Start Docker Containers- when: (not use_snapshot)+ when: docker_service_running and (not use_snapshot) block: - name: "Wait for 5 seconds before starting Docker containers" ansible.builtin.wait_for:@@ -726,7 +749,7 @@ - name: "Start 'cloudplow' service" ansible.builtin.systemd_service:- name: cloudplow+ name: "{{ cloudplow_service_name }}" state: started when: (cloudplow_service is defined) and cloudplow_service.stat.exists and cloudplow_service_running @@ -751,7 +774,7 @@ ignore_errors: true - name: Backup Cleanup Block- when: backup.rclone.enable and backup_cleanup_enabled and backup_success+ when: backup_rclone_enabled and backup_cleanup_enabled and backup_success block: - name: Determine number of existing backups ansible.builtin.shell: >
modified
roles/backup/tasks/restore_service.yml
@@ -123,7 +123,7 @@ {{ files_upload.results | selectattr('stdout', 'search', 'too large') | map(attribute='item') | map('regex_replace', '^/tmp/restore_service/|.enc$', '') | list | sort(case_sensitive=False) | join(', ') }} - - name: Restore Service | Print error mesage when config file(s) were too large to upload+ - name: Restore Service | Print error message when config file(s) were too large to upload ansible.builtin.debug: msg: "The following encrypted config file(s) were too large to upload to the Saltbox Restore Service: '{{ files_too_large_to_upload_list | trim }}'" when: (files_too_large_to_upload_list | trim | length > 0)
modified
roles/backup/tasks/sanity_check.yml
@@ -15,7 +15,7 @@ # Age in hours - name: "Get age of 'backup.lock' file" ansible.builtin.set_fact:- backup_lock_age: "{{ ((ansible_date_time.epoch | float - backup_lock.stat.mtime) / 3600) | int }}"+ backup_lock_age: "{{ ((ansible_facts['date_time']['epoch'] | float - backup_lock.stat.mtime) / 3600) | int }}" when: backup_lock.stat.exists # Delete if older than 2 hours.
modified
roles/backup/tasks/snapshot.yml
@@ -10,22 +10,22 @@ - name: "Snapshot | Determine '/' filesystem type" ansible.builtin.set_fact: root_fstype: "{{ item.fstype }}"- when: backup.misc.snapshot and (item.mount == '/')+ when: backup_snapshot_enabled and (item.mount == '/') with_items:- - "{{ ansible_mounts }}"+ - "{{ ansible_facts['mounts'] }}" -- name: "Snapshot | Determine '/opt' filesystem type"+- name: "Snapshot | Determine '{{ server_appdata_path }}' filesystem type" ansible.builtin.set_fact: opt_fstype: "{{ item.fstype }}"- when: backup.misc.snapshot and (item.mount == '/opt')+ when: backup_snapshot_enabled and (item.mount == server_appdata_path) with_items:- - "{{ ansible_mounts }}"+ - "{{ ansible_facts['mounts'] }}" # BTRFS - name: Snapshot | BTRFS specific tasks when:- - backup.misc.snapshot+ - backup_snapshot_enabled - ((root_fstype is defined) and (root_fstype == 'btrfs')) or ((opt_fstype is defined) and (opt_fstype == 'btrfs')) block: - name: "Snapshot | Set general BTRFS variables"@@ -37,14 +37,14 @@ ansible.builtin.set_fact: backup_snapshot_source_path: "/" backup_snapshot_destination_path: "/btrfs/snapshots/root"- backup_opt_path: "/btrfs/snapshots/root/opt/"+ backup_opt_path: "/btrfs/snapshots/root{{ server_appdata_path }}/" when: (root_fstype is defined) and (root_fstype == 'btrfs') - - name: "Snapshot | Set '/opt' BTRFS variables"+ - name: "Snapshot | Set '{{ server_appdata_path }}' BTRFS variables" ansible.builtin.set_fact:- backup_snapshot_source_path: "/opt"- backup_snapshot_destination_path: "/opt/snapshots/opt"- backup_opt_path: "/opt/snapshots/opt/"+ backup_snapshot_source_path: "{{ server_appdata_path }}"+ backup_snapshot_destination_path: "{{ server_appdata_path }}/snapshots/opt"+ backup_opt_path: "{{ server_appdata_path }}/snapshots/opt/" when: (opt_fstype is defined) and (opt_fstype == 'btrfs') - name: Snapshot | Check if BTRFS snapshot is mounted
modified
roles/backup2/tasks/main.yml
@@ -7,6 +7,11 @@ # GNU General Public License v3.0 # ######################################################################### ---+- name: Import cloudplow role variables+ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/variables/import_role_vars.yml"+ vars:+ import_role_name: cloudplow+ - name: Backup block: - name: "Set 'backup2_success' variable"@@ -14,10 +19,10 @@ backup2_success: false - name: Sanity Check- ansible.builtin.import_tasks: "sanity_check.yml"+ ansible.builtin.include_tasks: "sanity_check.yml" - name: Variables- ansible.builtin.import_tasks: "variables.yml"+ ansible.builtin.include_tasks: "variables.yml" tags: - set-backup2 - unset-backup2@@ -25,8 +30,8 @@ - saltbox-restore-service2 - name: Cron- ansible.builtin.import_tasks: "cron.yml"- when: (['set-backup2', 'unset-backup2'] | intersect(ansible_run_tags)) and not ('backup2' in ansible_run_tags)+ ansible.builtin.include_tasks: "cron.yml"+ when: ((['set-backup2', 'unset-backup2'] | intersect(ansible_run_tags)) | length > 0) and not ('backup2' in ansible_run_tags) tags: - set-backup2 - unset-backup2@@ -40,7 +45,7 @@ start_time: "{{ start_time_lookup.stdout }}" - name: Snapshot- ansible.builtin.import_tasks: "snapshot.yml"+ ansible.builtin.include_tasks: "snapshot.yml" - name: "Notify | Saltbox Backup: Started Saltbox backup task" ansible.builtin.include_role:@@ -97,8 +102,8 @@ with_items: - "/home/{{ user.name }}/logs" - "{{ backup.local.destination }}"- - "/opt/systemd-backup"- - "/opt/crontab-backup"+ - "{{ server_appdata_path }}/systemd-backup"+ - "{{ server_appdata_path }}/crontab-backup" # Backup config files - name: "Copy files to '{{ backup.local.destination }}'"@@ -144,13 +149,13 @@ else (playbook_dir + '/roles/backup2/files/backup_excludes_list.txt') }}" - name: saltbox Restore Service- ansible.builtin.import_tasks: "restore_service.yml"+ ansible.builtin.include_tasks: "restore_service.yml" when: restore_service_enabled tags: - restore-service2 - saltbox-restore-service2 - - name: "Synchronize '/etc/systemd/system' to '/opt/systemd-backup' for inclusion in backup"+ - name: "Synchronize '/etc/systemd/system' to '{{ server_appdata_path }}/systemd-backup' for inclusion in backup" ansible.builtin.shell: | /usr/bin/rsync \ --delay-updates \@@ -163,13 +168,13 @@ --exclude='saltbox_managed_*' \ --include='*.service' \ --include='*.mount' \- /etc/systemd/system/* /opt/systemd-backup/+ /etc/systemd/system/* {{ server_appdata_path }}/systemd-backup/ args: executable: /bin/bash ignore_errors: true - - name: "Copying crontabs to '/opt/crontab-backup' for inclusion in backup"- ansible.builtin.shell: "cp -f /var/spool/cron/crontabs/* /opt/crontab-backup"+ - name: "Copying crontabs to '{{ server_appdata_path }}/crontab-backup' for inclusion in backup"+ ansible.builtin.shell: "cp -f /var/spool/cron/crontabs/* {{ server_appdata_path }}/crontab-backup" ignore_errors: true - name: "Reset permissions of folders"@@ -181,61 +186,84 @@ mode: "0775" recurse: true with_items:- - "/opt/systemd-backup"- - "/opt/crontab-backup"-- # Stop Containers-- - name: "Gather list of running Docker containers"- ansible.builtin.shell: "docker ps --format '{{ '{{' }} .Names{{ '}}' }}' --filter label=com.github.saltbox.saltbox_managed=true | xargs echo -n"- register: docker_containers- ignore_errors: true-- - name: Set 'docker_containers' variable- ansible.builtin.set_fact:- docker_containers: "{{ docker_containers.stdout if (docker_containers is success) else '' }}"-- - name: Docker container tasks- when: (docker_containers | trim | length > 0)- block:- - name: Convert Docker containers string into a list+ - "{{ server_appdata_path }}/systemd-backup"+ - "{{ server_appdata_path }}/crontab-backup"++ - name: Populate Service Facts+ ansible.builtin.service_facts:++ - name: Set docker_service_running+ ansible.builtin.set_fact:+ docker_service_running: "{{ (ansible_facts['services']['docker.service'] is defined) and (ansible_facts['services']['docker.service']['state'] == 'running') }}"++ - name: Stop Docker Container Tasks+ when: docker_service_running+ block:+ - name: "Gather Docker container information"+ community.docker.docker_host_info:+ containers: true+ containers_filters:+ label:+ - "com.github.saltbox.saltbox_managed=true"+ status:+ - "running"+ register: docker_info++ - name: Extract and process container names ansible.builtin.set_fact:- docker_containers: "{{ docker_containers.split() | reject('in', gluetun_instances | default(['gluetun'])) | sort }}"-- - name: Filter out ignored apps from Docker containers list+ docker_containers: "{{ docker_info.containers | map(attribute='Names') | map('first') | map('regex_replace', '^/', '') | reject('in', gluetun_instances | default(['gluetun'])) | sort }}"+ when:+ - (docker_info is success)+ - (docker_info.containers is defined)+ - (docker_info.containers | length > 0)++ - name: Set empty list if no containers found ansible.builtin.set_fact:- docker_containers: "{{ docker_containers | difference(reverse_proxy_apps + torrent_apps + backup_ignore_containers) }}"- ignore_containers: "{{ reverse_proxy_apps + torrent_apps + backup_ignore_containers }}"-- - name: Convert Docker containers list back to string- ansible.builtin.set_fact:- docker_containers_string: "{{ docker_containers | join(' ') }}"- ignore_query_string: >-- {% if docker_containers | length > 0 and ignore_containers | length > 0 %}- {%- set ignore_params = [] -%}- {% for container in ignore_containers %}- {%- set _ = ignore_params.append('ignore=' ~ container) -%}- {%- endfor -%}- {{ ignore_params | join('&') }}- {%- else -%}- {% endif %}-- - name: Stop Saltbox Docker containers- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/stop_saltbox_docker_containers.yml"- vars:- _query_var: "{{ '?' + ignore_query_string if (ignore_query_string | length > 0) else '' }}"-- - name: "Stop all running Docker containers"- ansible.builtin.shell: "docker stop {{ docker_containers_string }}"- ignore_errors: true- when: (docker_containers_string | trim | length > 0)-- - name: "Notify | Saltbox Backup: Stopped Docker containers"- ansible.builtin.include_role:- name: notify- vars:- message: "{{ backup_instance }} Backup: Stopped Docker containers."- when: backup_notify_stop_docker_containers+ docker_containers: []+ when: (docker_info is not success) or (docker_info.containers is not defined) or (docker_info.containers | length == 0)++ - name: Docker container tasks+ when: (docker_containers | length > 0)+ block:+ - name: Build list of containers to ignore+ ansible.builtin.set_fact:+ ignore_containers: "{{ reverse_proxy_apps + torrent_apps + backup2_ignore_containers }}"++ - name: Filter out ignored apps from Docker containers list+ ansible.builtin.set_fact:+ docker_containers: "{{ docker_containers | difference(ignore_containers) }}"++ - name: Convert Docker containers list back to string+ ansible.builtin.set_fact:+ docker_containers_string: "{{ docker_containers | join(' ') }}"+ ignore_query_string: >-+ {% if docker_containers | length > 0 and ignore_containers | length > 0 %}+ {%- set ignore_params = [] -%}+ {% for container in ignore_containers %}+ {%- set _ = ignore_params.append('ignore=' ~ container) -%}+ {%- endfor -%}+ {{ ignore_params | join('&') }}+ {%- else -%}+ {% endif %}++ - name: Stop Saltbox Docker containers+ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/stop_saltbox_docker_containers.yml"+ vars:+ _query_var: "{{ '?' + ignore_query_string+ if (ignore_query_string | length > 0)+ else '' }}"++ - name: "Stop all running Docker containers"+ ansible.builtin.shell: "docker stop {{ docker_containers_string }}"+ ignore_errors: true+ when: (docker_containers_string | trim | length > 0)++ - name: "Notify | Saltbox Backup: Stopped Docker containers"+ ansible.builtin.include_role:+ name: notify+ vars:+ message: "{{ backup_instance }} Backup: Stopped Docker containers."+ when: backup_notify_stop_docker_containers # Custom User Files @@ -252,16 +280,11 @@ ignore_errors: true when: (backup2_user_defined_files | length > 0) - # Services-- - name: Populate Service Facts- ansible.builtin.service_facts:-- # Stop Cloudplow+ # Stop Cloudplow - name: Check if 'cloudplow.service' exists ansible.builtin.stat:- path: "/etc/systemd/system/cloudplow.service"+ path: "/etc/systemd/system/{{ cloudplow_service_name }}.service" register: cloudplow_service - name: Stop 'cloudplow' service block@@ -269,18 +292,18 @@ block: - name: Get 'cloudplow' service state ansible.builtin.set_fact:- cloudplow_service_running: "{{ (services['cloudplow.service'] is defined) and (services['cloudplow.service']['state'] == 'running') }}"+ cloudplow_service_running: "{{ (ansible_facts['services'][cloudplow_service_name + '.service'] is defined) and (ansible_facts['services'][cloudplow_service_name + '.service']['state'] == 'running') }}" - name: Stop 'cloudplow' service ansible.builtin.systemd_service:- name: cloudplow+ name: "{{ cloudplow_service_name }}" state: stopped when: cloudplow_service_running # Create snapshot - name: Create Snapshot- when: use_snapshot+ when: docker_service_running and use_snapshot block: - name: "Snapshot | Wait for 5 seconds before creating snapshot" ansible.builtin.wait_for:@@ -353,7 +376,7 @@ become_user: "{{ user.name }}" register: rclone_timestamp ignore_errors: true- when: backup.rclone.enable+ when: backup_rclone_enabled - name: Define Archive List ansible.builtin.set_fact:@@ -384,7 +407,7 @@ register: rclone_move failed_when: rclone_move.rc > 3 ignore_errors: true- when: backup.rclone.enable and (rclone_timestamp is defined) and ('Failed' not in rclone_timestamp.stderr)+ when: backup_rclone_enabled and (rclone_timestamp is defined) and ('Failed' not in rclone_timestamp.stderr) loop: "{{ backup_archive_list + (backup2_user_defined_files | map('basename') | list) }}" - name: "Use rclone to upload backup to '{{ backup.rclone.destination }}'"@@ -393,7 +416,7 @@ environment: "{{ backup2_rclone_env }}" become: true become_user: "{{ user.name }}"- when: backup.rclone.enable+ when: backup_rclone_enabled - name: "Wait for 5 seconds before uploading" ansible.builtin.wait_for:@@ -412,14 +435,14 @@ environment: "{{ backup2_rclone_env }}" become: true become_user: "{{ user.name }}"- when: backup.rclone.enable+ when: backup_rclone_enabled - name: "Run items asynchronously in batches of {{ backup2_async_batch_size }} items" ansible.builtin.include_tasks: upload.yml vars: folders: "{{ item }}" loop: "{{ opt_folders | batch(backup2_async_batch_size | int) | list }}"- when: backup.rclone.enable+ when: backup_rclone_enabled - name: Cleanup sync cache # noqa args[module] ansible.builtin.async_status:@@ -436,7 +459,7 @@ name: notify vars: message: "{{ backup_instance }} Backup: Rclone uploaded backup to '{{ backup.rclone.destination }}'."- when: backup.rclone.enable and backup_notify_rclone_complete+ when: backup_rclone_enabled and backup_notify_rclone_complete - name: Snapshot | Cleanup Tasks when: use_snapshot and (snapshot_type == 'btrfs')@@ -464,7 +487,7 @@ # Start Docker containers when snapshot is not enabled - name: Start Docker Containers- when: (not use_snapshot)+ when: docker_service_running and (not use_snapshot) block: - name: "Wait for 5 seconds before starting Docker containers" ansible.builtin.wait_for:@@ -524,7 +547,7 @@ - name: "Start 'cloudplow' service" ansible.builtin.systemd_service:- name: cloudplow+ name: "{{ cloudplow_service_name }}" state: started when: (cloudplow_service is defined) and (cloudplow_service.stat.exists) and (cloudplow_service_running) @@ -561,7 +584,7 @@ when: (snapshot_deletion is failed) - name: Start Docker Containers- when: (not use_snapshot)+ when: docker_service_running and (not use_snapshot) block: - name: "Wait for 5 seconds before starting Docker containers" ansible.builtin.wait_for:@@ -578,7 +601,7 @@ - name: "Start 'cloudplow' service" ansible.builtin.systemd_service:- name: cloudplow+ name: "{{ cloudplow_service_name }}" state: started when: (cloudplow_service is defined) and cloudplow_service.stat.exists and cloudplow_service_running @@ -602,7 +625,7 @@ ansible.builtin.shell: "chown -R {{ user.name }}:{{ user.name }} '/home/{{ user.name }}/logs/'" - name: Backup Cleanup Block- when: backup.rclone.enable and backup2_cleanup_enabled and backup2_success+ when: backup_rclone_enabled and backup2_cleanup_enabled and backup2_success block: - name: Determine number of existing backups ansible.builtin.shell: >
modified
roles/backup2/tasks/restore_service.yml
@@ -10,7 +10,7 @@ - name: Restore Service | Install 'curl' ansible.builtin.apt: name: curl- state: present+ state: latest - name: Restore Service | Set variables ansible.builtin.set_fact:@@ -123,7 +123,7 @@ {{ files_upload.results | selectattr('stdout', 'search', 'too large') | map(attribute='item') | map('regex_replace', '^/tmp/restore_service/|.enc$', '') | list | sort(case_sensitive=False) | join(', ') }} - - name: Restore Service | Print error mesage when config file(s) were too large to upload+ - name: Restore Service | Print error message when config file(s) were too large to upload ansible.builtin.debug: msg: "The following encrypted config file(s) were too large to upload to the Saltbox Restore Service: '{{ files_too_large_to_upload_list | trim }}'" when: (files_too_large_to_upload_list | trim | length > 0)
modified
roles/backup2/tasks/sanity_check.yml
@@ -15,7 +15,7 @@ # Age in hours - name: "Get age of 'backup.lock' file" ansible.builtin.set_fact:- backup_lock_age: "{{ ((ansible_date_time.epoch | float - backup_lock.stat.mtime) / 3600) | int }}"+ backup_lock_age: "{{ ((ansible_facts['date_time']['epoch'] | float - backup_lock.stat.mtime) / 3600) | int }}" when: backup_lock.stat.exists # Delete if older than 2 hours.
modified
roles/backup2/tasks/snapshot.yml
@@ -10,22 +10,22 @@ - name: "Snapshot | Determine '/' filesystem type" ansible.builtin.set_fact: root_fstype: "{{ item.fstype }}"- when: backup.misc.snapshot and (item.mount == '/')+ when: backup_snapshot_enabled and (item.mount == '/') with_items:- - "{{ ansible_mounts }}"+ - "{{ ansible_facts['mounts'] }}" -- name: "Snapshot | Determine '/opt' filesystem type"+- name: "Snapshot | Determine '{{ server_appdata_path }}' filesystem type" ansible.builtin.set_fact: opt_fstype: "{{ item.fstype }}"- when: backup.misc.snapshot and (item.mount == '/opt')+ when: backup_snapshot_enabled and (item.mount == server_appdata_path) with_items:- - "{{ ansible_mounts }}"+ - "{{ ansible_facts['mounts'] }}" # BTRFS - name: Snapshot | BTRFS specific tasks when:- - backup.misc.snapshot+ - backup_snapshot_enabled - ((root_fstype is defined) and (root_fstype == 'btrfs')) or ((opt_fstype is defined) and (opt_fstype == 'btrfs')) block: - name: "Snapshot | Set general BTRFS variables"@@ -37,14 +37,14 @@ ansible.builtin.set_fact: backup_snapshot_source_path: "/" backup_snapshot_destination_path: "/btrfs/snapshots/root"- backup_opt_path: "/btrfs/snapshots/root/opt/"+ backup_opt_path: "/btrfs/snapshots/root{{ server_appdata_path }}/" when: (root_fstype is defined) and (root_fstype == 'btrfs') - - name: "Snapshot | Set '/opt' BTRFS variables"+ - name: "Snapshot | Set '{{ server_appdata_path }}' BTRFS variables" ansible.builtin.set_fact:- backup_snapshot_source_path: "/opt"- backup_snapshot_destination_path: "/opt/snapshots/opt"- backup_opt_path: "/opt/snapshots/opt/"+ backup_snapshot_source_path: "{{ server_appdata_path }}"+ backup_snapshot_destination_path: "{{ server_appdata_path }}/snapshots/opt"+ backup_opt_path: "{{ server_appdata_path }}/snapshots/opt/" when: (opt_fstype is defined) and (opt_fstype == 'btrfs') - name: Snapshot | Check if BTRFS snapshot is mounted
modified
roles/backup2/tasks/upload.yml
@@ -11,7 +11,7 @@ {{ "--user-agent='" + backup2_user_agent + "'" if (backup2_user_agent | length > 0) else "" }} \ {{ lookup('vars', 'backup2_' + backup.rclone.template + '_template', default='') }} \ --stats=30s \- {{ '--bind=' + ansible_default_ipv4.address if mounts.ipv4_only else '' }} \+ {{ '--bind=' + ansible_facts['default_ipv4']['address'] if rclone_mounts_ipv4_only else '' }} \ --bwlimit={{ backup2_rclone_upload_speed_limit }} \ -vv \ --log-file='{{ playbook_dir }}/backup_rclone.log' \
modified
roles/bazarr/defaults/main.yml
@@ -17,158 +17,115 @@ # Paths ################################ -bazarr_paths_folder: "{{ bazarr_name }}"-bazarr_paths_location: "{{ server_appdata_path }}/{{ bazarr_paths_folder }}"-bazarr_paths_folders_list:- - "{{ bazarr_paths_location }}"+bazarr_role_paths_folder: "{{ bazarr_name }}"+bazarr_role_paths_location: "{{ server_appdata_path }}/{{ bazarr_role_paths_folder }}"+bazarr_role_paths_folders_list:+ - "{{ bazarr_role_paths_location }}" ################################ # Web ################################ -bazarr_web_subdomain: "{{ bazarr_name }}"-bazarr_web_domain: "{{ user.domain }}"-bazarr_web_port: "6767"-bazarr_web_url: "{{ 'https://' + (lookup('vars', bazarr_name + '_web_subdomain', default=bazarr_web_subdomain) + '.' + lookup('vars', bazarr_name + '_web_domain', default=bazarr_web_domain)- if (lookup('vars', bazarr_name + '_web_subdomain', default=bazarr_web_subdomain) | length > 0)- else lookup('vars', bazarr_name + '_web_domain', default=bazarr_web_domain)) }}"+bazarr_role_web_subdomain: "{{ bazarr_name }}"+bazarr_role_web_domain: "{{ user.domain }}"+bazarr_role_web_port: "6767"+bazarr_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='bazarr') + '.' + lookup('role_var', '_web_domain', role='bazarr')+ if (lookup('role_var', '_web_subdomain', role='bazarr') | length > 0)+ else lookup('role_var', '_web_domain', role='bazarr')) }}" ################################ # DNS ################################ -bazarr_dns_record: "{{ lookup('vars', bazarr_name + '_web_subdomain', default=bazarr_web_subdomain) }}"-bazarr_dns_zone: "{{ lookup('vars', bazarr_name + '_web_domain', default=bazarr_web_domain) }}"-bazarr_dns_proxy: "{{ dns.proxied }}"+bazarr_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='bazarr') }}"+bazarr_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='bazarr') }}"+bazarr_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -bazarr_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-bazarr_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', bazarr_name + '_name', default=bazarr_name)- if (bazarr_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-bazarr_traefik_middleware_custom: ""-bazarr_traefik_certresolver: "{{ traefik_default_certresolver }}"-bazarr_traefik_enabled: true-bazarr_traefik_api_enabled: true-bazarr_traefik_api_endpoint: "PathPrefix(`/api`)"+bazarr_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+bazarr_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + bazarr_name+ if (lookup('role_var', '_themepark_enabled', role='bazarr') and global_themepark_plugin_enabled)+ else '') }}"+bazarr_role_traefik_middleware_custom: ""+bazarr_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+bazarr_role_traefik_enabled: true+bazarr_role_traefik_api_enabled: true+bazarr_role_traefik_api_endpoint: "PathPrefix(`/api`)" ################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-bazarr_themepark_enabled: false-bazarr_themepark_app: "bazarr"-bazarr_themepark_theme: "{{ global_themepark_theme }}"-bazarr_themepark_domain: "{{ global_themepark_domain }}"-bazarr_themepark_addons: []+bazarr_role_themepark_enabled: false+bazarr_role_themepark_app: "bazarr"+bazarr_role_themepark_theme: "{{ global_themepark_theme }}"+bazarr_role_themepark_domain: "{{ global_themepark_domain }}"+bazarr_role_themepark_addons: [] ################################ # Docker ################################ # Container-bazarr_docker_container: "{{ bazarr_name }}"+bazarr_role_docker_container: "{{ bazarr_name }}" # Image-bazarr_docker_image_pull: true-bazarr_docker_image_repo: "ghcr.io/hotio/bazarr"-bazarr_docker_image_tag: "latest"-bazarr_docker_image: "{{ lookup('vars', bazarr_name + '_docker_image_repo', default=bazarr_docker_image_repo)- + ':' + lookup('vars', bazarr_name + '_docker_image_tag', default=bazarr_docker_image_tag) }}"--# Ports-bazarr_docker_ports_defaults: []-bazarr_docker_ports_custom: []-bazarr_docker_ports: "{{ lookup('vars', bazarr_name + '_docker_ports_defaults', default=bazarr_docker_ports_defaults)- + lookup('vars', bazarr_name + '_docker_ports_custom', default=bazarr_docker_ports_custom) }}"+bazarr_role_docker_image_pull: true+bazarr_role_docker_image_repo: "ghcr.io/hotio/bazarr"+bazarr_role_docker_image_tag: "latest"+bazarr_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='bazarr') }}:{{ lookup('role_var', '_docker_image_tag', role='bazarr') }}" # Envs-bazarr_docker_envs_default:+bazarr_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" UMASK: "002" TZ: "{{ tz }}"-bazarr_docker_envs_custom: {}-bazarr_docker_envs: "{{ lookup('vars', bazarr_name + '_docker_envs_default', default=bazarr_docker_envs_default)- | combine(lookup('vars', bazarr_name + '_docker_envs_custom', default=bazarr_docker_envs_custom)) }}"--# Commands-bazarr_docker_commands_default: []-bazarr_docker_commands_custom: []-bazarr_docker_commands: "{{ lookup('vars', bazarr_name + '_docker_commands_default', default=bazarr_docker_commands_default)- + lookup('vars', bazarr_name + '_docker_commands_custom', default=bazarr_docker_commands_custom) }}"+bazarr_role_docker_envs_custom: {}+bazarr_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='bazarr')+ | combine(lookup('role_var', '_docker_envs_custom', role='bazarr')) }}" # Volumes-bazarr_docker_volumes_default:- - "{{ bazarr_paths_location }}:/config"- - "/opt/scripts:/scripts"-bazarr_docker_volumes_legacy:+bazarr_role_docker_volumes_default:+ - "{{ bazarr_role_paths_location }}:/config"+ - "{{ server_appdata_path }}/scripts:/scripts"+bazarr_role_docker_volumes_legacy: - "/mnt/unionfs/Media/Movies:/movies" - "/mnt/unionfs/Media/TV:/tv"-bazarr_docker_volumes_custom: []-bazarr_docker_volumes: "{{ lookup('vars', bazarr_name + '_docker_volumes_default', default=bazarr_docker_volumes_default)- + lookup('vars', bazarr_name + '_docker_volumes_custom', default=bazarr_docker_volumes_custom)- + (lookup('vars', bazarr_name + '_docker_volumes_legacy', default=bazarr_docker_volumes_legacy)- if docker_legacy_volume- else []) }}"--# Devices-bazarr_docker_devices_default: []-bazarr_docker_devices_custom: []-bazarr_docker_devices: "{{ lookup('vars', bazarr_name + '_docker_devices_default', default=bazarr_docker_devices_default)- + lookup('vars', bazarr_name + '_docker_devices_custom', default=bazarr_docker_devices_custom) }}"--# Hosts-bazarr_docker_hosts_default: {}-bazarr_docker_hosts_custom: {}-bazarr_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', bazarr_name + '_docker_hosts_default', default=bazarr_docker_hosts_default))- | combine(lookup('vars', bazarr_name + '_docker_hosts_custom', default=bazarr_docker_hosts_custom)) }}"+bazarr_role_docker_volumes_custom: []+bazarr_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='bazarr')+ + lookup('role_var', '_docker_volumes_custom', role='bazarr')+ + (lookup('role_var', '_docker_volumes_legacy', role='bazarr')+ if docker_legacy_volume+ else []) }}" # Labels-bazarr_docker_labels_default: {}-bazarr_docker_labels_custom: {}-bazarr_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', bazarr_name + '_docker_labels_default', default=bazarr_docker_labels_default))- | combine((traefik_themepark_labels- if (bazarr_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', bazarr_name + '_docker_labels_custom', default=bazarr_docker_labels_custom)) }}"+bazarr_role_docker_labels_default: {}+bazarr_role_docker_labels_custom: {}+bazarr_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='bazarr')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='bazarr') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='bazarr')) }}" # Hostname-bazarr_docker_hostname: "{{ bazarr_name }}"--# Network Mode-bazarr_docker_network_mode_default: "{{ docker_networks_name_common }}"-bazarr_docker_network_mode: "{{ lookup('vars', bazarr_name + '_docker_network_mode_default', default=bazarr_docker_network_mode_default) }}"+bazarr_role_docker_hostname: "{{ bazarr_name }}" # Networks-bazarr_docker_networks_alias: "{{ bazarr_name }}"-bazarr_docker_networks_default: []-bazarr_docker_networks_custom: []-bazarr_docker_networks: "{{ docker_networks_common- + lookup('vars', bazarr_name + '_docker_networks_default', default=bazarr_docker_networks_default)- + lookup('vars', bazarr_name + '_docker_networks_custom', default=bazarr_docker_networks_custom) }}"--# Capabilities-bazarr_docker_capabilities_default: []-bazarr_docker_capabilities_custom: []-bazarr_docker_capabilities: "{{ lookup('vars', bazarr_name + '_docker_capabilities_default', default=bazarr_docker_capabilities_default)- + lookup('vars', bazarr_name + '_docker_capabilities_custom', default=bazarr_docker_capabilities_custom) }}"--# Security Opts-bazarr_docker_security_opts_default: []-bazarr_docker_security_opts_custom: []-bazarr_docker_security_opts: "{{ lookup('vars', bazarr_name + '_docker_security_opts_default', default=bazarr_docker_security_opts_default)- + lookup('vars', bazarr_name + '_docker_security_opts_custom', default=bazarr_docker_security_opts_custom) }}"+bazarr_role_docker_networks_alias: "{{ bazarr_name }}"+bazarr_role_docker_networks_default: []+bazarr_role_docker_networks_custom: []+bazarr_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='bazarr')+ + lookup('role_var', '_docker_networks_custom', role='bazarr') }}" # Restart Policy-bazarr_docker_restart_policy: unless-stopped+bazarr_role_docker_restart_policy: unless-stopped # State-bazarr_docker_state: started+bazarr_role_docker_state: started
modified
roles/bazarr/tasks/main2.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"
modified
roles/btop/tasks/main.yml
@@ -50,7 +50,7 @@ ansible.builtin.shell: "bash /tmp/btop/btop/install.sh" args: chdir: "/tmp/btop/btop"- become: yes+ become: true become_user: "{{ user.name }}" - name: "btop | Remove temp folder"
modified
roles/btrfsmaintenance/tasks/main.yml
@@ -12,14 +12,14 @@ root_fstype: "{{ item.fstype }}" when: (item.mount == '/') with_items:- - "{{ ansible_mounts }}"+ - "{{ ansible_facts['mounts'] }}" -- name: "Determine '/opt' filesystem type"+- name: "Determine '{{ server_appdata_path }}' filesystem type" ansible.builtin.set_fact: opt_fstype: "{{ item.fstype }}"- when: (item.mount == '/opt')+ when: (item.mount == server_appdata_path) with_items:- - "{{ ansible_mounts }}"+ - "{{ ansible_facts['mounts'] }}" - name: "Setup btrfsmaintenance scripts" when: ((root_fstype is defined) and (root_fstype == 'btrfs')) or ((opt_fstype is defined) and (opt_fstype == 'btrfs'))@@ -27,7 +27,7 @@ - name: "Install apt package" ansible.builtin.apt: name: "btrfsmaintenance"- state: present+ state: latest - name: "Change mountpoints to 'auto'" ansible.builtin.lineinfile:@@ -55,5 +55,5 @@ # Check for no btrfs and exit - not working - name: "Exit if no btrfs partitions found" ansible.builtin.debug:- msg: "No btrfs partition found on / or /opt - exiting"+ msg: "No btrfs partition found on / or {{ server_appdata_path }} - exiting" when: ((root_fstype is defined) and (root_fstype != 'btrfs')) and ((opt_fstype is defined) and (opt_fstype != 'btrfs'))
modified
roles/cadvisor/defaults/main.yml
@@ -16,123 +16,80 @@ # Web ################################ -cadvisor_web_subdomain: "{{ cadvisor_name }}"-cadvisor_web_domain: "{{ user.domain }}"-cadvisor_web_port: "8080"-cadvisor_web_url: "{{ 'https://' + (lookup('vars', cadvisor_name + '_web_subdomain', default=cadvisor_web_subdomain) + '.' + lookup('vars', cadvisor_name + '_web_domain', default=cadvisor_web_domain)- if (lookup('vars', cadvisor_name + '_web_subdomain', default=cadvisor_web_subdomain) | length > 0)- else lookup('vars', cadvisor_name + '_web_domain', default=cadvisor_web_domain)) }}"+cadvisor_role_web_subdomain: "{{ cadvisor_name }}"+cadvisor_role_web_domain: "{{ user.domain }}"+cadvisor_role_web_port: "8080"+cadvisor_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='cadvisor') + '.' + lookup('role_var', '_web_domain', role='cadvisor')+ if (lookup('role_var', '_web_subdomain', role='cadvisor') | length > 0)+ else lookup('role_var', '_web_domain', role='cadvisor')) }}" ################################ # DNS ################################ -cadvisor_dns_record: "{{ lookup('vars', cadvisor_name + '_web_subdomain', default=cadvisor_web_subdomain) }}"-cadvisor_dns_zone: "{{ lookup('vars', cadvisor_name + '_web_domain', default=cadvisor_web_domain) }}"-cadvisor_dns_proxy: "{{ dns.proxied }}"+cadvisor_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='cadvisor') }}"+cadvisor_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='cadvisor') }}"+cadvisor_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -cadvisor_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-cadvisor_traefik_middleware_default: "{{ traefik_default_middleware }}"-cadvisor_traefik_middleware_custom: ""-cadvisor_traefik_certresolver: "{{ traefik_default_certresolver }}"-cadvisor_traefik_enabled: true-cadvisor_traefik_api_enabled: false-cadvisor_traefik_api_endpoint: ""+cadvisor_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+cadvisor_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+cadvisor_role_traefik_middleware_custom: ""+cadvisor_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+cadvisor_role_traefik_enabled: true+cadvisor_role_traefik_api_enabled: false+cadvisor_role_traefik_api_endpoint: "" ################################ # Docker ################################ # Container-cadvisor_docker_container: "{{ cadvisor_name }}"+cadvisor_role_docker_container: "{{ cadvisor_name }}" # Image-cadvisor_docker_image_pull: true-cadvisor_docker_image_tag: "latest"-cadvisor_docker_image: "gcr.io/cadvisor/cadvisor:{{ cadvisor_docker_image_tag }}"--# Ports-cadvisor_docker_ports_defaults: []-cadvisor_docker_ports_custom: []-cadvisor_docker_ports: "{{ cadvisor_docker_ports_defaults- + cadvisor_docker_ports_custom }}"+cadvisor_role_docker_image_pull: true+cadvisor_role_docker_image_repo: "gcr.io/cadvisor/cadvisor"+cadvisor_role_docker_image_tag: "latest"+cadvisor_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='cadvisor') }}:{{ lookup('role_var', '_docker_image_tag', role='cadvisor') }}" # Envs-cadvisor_docker_envs_default:+cadvisor_role_docker_envs_default: TZ: "{{ tz }}"-cadvisor_docker_envs_custom: {}-cadvisor_docker_envs: "{{ cadvisor_docker_envs_default- | combine(cadvisor_docker_envs_custom) }}"--# Commands-cadvisor_docker_commands_default: []-cadvisor_docker_commands_custom: []-cadvisor_docker_commands: "{{ cadvisor_docker_commands_default- + cadvisor_docker_commands_custom }}"+cadvisor_role_docker_envs_custom: {}+cadvisor_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='cadvisor')+ | combine(lookup('role_var', '_docker_envs_custom', role='cadvisor')) }}" # Volumes-cadvisor_docker_volumes_default:+cadvisor_role_docker_volumes_default: - "/:/rootfs:ro" - "/var/run:/var/run:ro" - "/sys:/sys:ro" - "/var/lib/docker/:/var/lib/docker:ro" - "/dev/disk/:/dev/disk:ro"-cadvisor_docker_volumes_custom: []-cadvisor_docker_volumes: "{{ cadvisor_docker_volumes_default- + cadvisor_docker_volumes_custom }}"--# Devices-cadvisor_docker_devices_default: []-cadvisor_docker_devices_custom: []-cadvisor_docker_devices: "{{ cadvisor_docker_devices_default- + cadvisor_docker_devices_custom }}"--# Hosts-cadvisor_docker_hosts_default: {}-cadvisor_docker_hosts_custom: {}-cadvisor_docker_hosts: "{{ docker_hosts_common- | combine(cadvisor_docker_hosts_default)- | combine(cadvisor_docker_hosts_custom) }}"--# Labels-cadvisor_docker_labels_default: {}-cadvisor_docker_labels_custom: {}-cadvisor_docker_labels: "{{ docker_labels_common- | combine(cadvisor_docker_labels_default)- | combine(cadvisor_docker_labels_custom) }}"+cadvisor_role_docker_volumes_custom: []+cadvisor_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='cadvisor')+ + lookup('role_var', '_docker_volumes_custom', role='cadvisor') }}" # Hostname-cadvisor_docker_hostname: "{{ cadvisor_name }}"+cadvisor_role_docker_hostname: "{{ cadvisor_name }}" # Networks-cadvisor_docker_networks_alias: "{{ cadvisor_name }}"-cadvisor_docker_networks_default: []-cadvisor_docker_networks_custom: []-cadvisor_docker_networks: "{{ docker_networks_common- + cadvisor_docker_networks_default- + cadvisor_docker_networks_custom }}"--# Capabilities-cadvisor_docker_capabilities_default: []-cadvisor_docker_capabilities_custom: []-cadvisor_docker_capabilities: "{{ cadvisor_docker_capabilities_default- + cadvisor_docker_capabilities_custom }}"--# Security Opts-cadvisor_docker_security_opts_default: []-cadvisor_docker_security_opts_custom: []-cadvisor_docker_security_opts: "{{ cadvisor_docker_security_opts_default- + cadvisor_docker_security_opts_custom }}"+cadvisor_role_docker_networks_alias: "{{ cadvisor_name }}"+cadvisor_role_docker_networks_default: []+cadvisor_role_docker_networks_custom: []+cadvisor_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='cadvisor')+ + lookup('role_var', '_docker_networks_custom', role='cadvisor') }}" # Restart Policy-cadvisor_docker_restart_policy: unless-stopped+cadvisor_role_docker_restart_policy: unless-stopped # State-cadvisor_docker_state: started+cadvisor_role_docker_state: started # Privileged-cadvisor_docker_privileged: true+cadvisor_role_docker_privileged: true
modified
roles/cadvisor/tasks/main.yml
@@ -9,9 +9,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"
modified
roles/cloudflare/tasks/main.yml
@@ -8,14 +8,14 @@ ######################################################################### --- - name: Variables Task- ansible.builtin.import_tasks: "subtasks/variables.yml"+ ansible.builtin.include_tasks: "subtasks/variables.yml" - name: Saltbox Subdomains Task- ansible.builtin.import_tasks: "subtasks/subdomains.yml"+ ansible.builtin.include_tasks: "subtasks/subdomains.yml" when: (saltbox_type | length > 0) and not skip_dns - name: Purge Cache Task- ansible.builtin.import_tasks: "subtasks/purge_cache.yml"+ ansible.builtin.include_tasks: "subtasks/purge_cache.yml" - name: Configuration Rules Task- ansible.builtin.import_tasks: "subtasks/configuration_rules.yml"+ ansible.builtin.include_tasks: "subtasks/configuration_rules.yml"
modified
roles/cloudflare/tasks/subtasks/configuration_rules.yml
@@ -130,11 +130,11 @@ when: ruleset_exists and not rule_exists - name: Configuration Rules | Remove Configuration Rule Task- ansible.builtin.import_tasks: "configuration_rules/remove_configuration_rule.yml"+ ansible.builtin.include_tasks: "configuration_rules/remove_configuration_rule.yml" when: ruleset_exists and rule_exists # Create new CF Configuration Rule for Lets Encrypt - name: Configuration Rules | Add Configuration Rule Task- ansible.builtin.import_tasks: "configuration_rules/add_configuration_rule.yml"+ ansible.builtin.include_tasks: "configuration_rules/add_configuration_rule.yml" when: ruleset_exists and rule_exists
modified
roles/cloudflare/tasks/subtasks/configuration_rules/add_configuration_rule.yml
@@ -28,5 +28,5 @@ - name: Configuration Rules | Print result of Cloudflare Configuration Rule Creation ansible.builtin.debug:- msg: "Successfully created Let's Encrypt Cloudflare configuration rule for '{{ fld.stdout }}'"+ msg: "Successfully created Let's Encrypt Cloudflare configuration rule for '{{ domain_parsed.fld }}'" when: cf_configuration_rule_creation.json.success is defined and cf_configuration_rule_creation.json.success
modified
roles/cloudflare/tasks/subtasks/purge_cache.yml
@@ -7,7 +7,7 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Purge Cache | Purge Cloudflare Cache for '{{ fld.stdout }}'+- name: Purge Cache | Purge Cloudflare Cache for '{{ domain_parsed.fld }}' ansible.builtin.uri: url: 'https://api.cloudflare.com/client/v4/zones/{{ cloudflare_zone }}/purge_cache' method: 'POST'@@ -23,4 +23,4 @@ ansible.builtin.debug: msg: "{{ ((cf_purge.json.success is defined) and (cf_purge.json.success)) | ternary('Successfully', 'Unsuccessfully') }}- purged Cloudflare cache for '{{ fld.stdout }}'"+ purged Cloudflare cache for '{{ domain_parsed.fld }}'"
modified
roles/cloudflare/tasks/subtasks/subdomains.yml
@@ -8,10 +8,10 @@ ######################################################################### --- ## Add 'saltbox_type' subdomain-- name: "Cloudflare | Subdomains | Add '{{ saltbox_type }}' subdomain to '{{ fld.stdout }}'"- ansible.builtin.import_tasks: "subdomains/add_subdomain.yml"+- name: "Cloudflare | Subdomains | Add '{{ saltbox_type }}' subdomain to '{{ domain_parsed.fld }}'"+ ansible.builtin.include_tasks: "subdomains/add_subdomain.yml" ## Remove 'saltbox' subdomain for Mediabox/Feederbox setups.-- name: "Cloudflare | Subdomains | Remove 'saltbox' subdomain from '{{ fld.stdout }}'"- ansible.builtin.import_tasks: "subdomains/remove_subdomain.yml"+- name: "Cloudflare | Subdomains | Remove 'saltbox' subdomain from '{{ domain_parsed.fld }}'"+ ansible.builtin.include_tasks: "subdomains/remove_subdomain.yml" when: saltbox_type is regex('mediabox|feederbox')
modified
roles/cloudflare/tasks/subtasks/subdomains/add_subdomain.yml
@@ -7,28 +7,28 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: "Subdomains | Add Subdomain | Cloudflare: DNS Record for '{{ saltbox_type }}' set to '{{ ip_address_public }}' was added. Proxy: {{ dns.proxied | default('no') }}"+- name: "Subdomains | Add Subdomain | Cloudflare: DNS Record for '{{ saltbox_type }}' set to '{{ ip_address_public }}' was added. Proxy: {{ dns_proxied | default('no') }}" community.general.cloudflare_dns: account_api_token: "{{ cloudflare.api }}" account_email: "{{ cloudflare.email }}"- zone: "{{ fld.stdout }}"+ zone: "{{ domain_parsed.fld }}" state: present solo: true- proxied: "{{ dns.proxied | default('no') }}"+ proxied: "{{ dns_proxied | default('no') }}" type: A value: "{{ ip_address_public }}" record: "{{ saltbox_type }}"- when: dns.ipv4+ when: dns_ipv4_enabled -- name: "Subdomains | Add Subdomain | Cloudflare: DNS Record for '{{ saltbox_type }}' set to '{{ ipv6_address_public }}' was added. Proxy: {{ dns.proxied | default('no') }}"+- name: "Subdomains | Add Subdomain | Cloudflare: DNS Record for '{{ saltbox_type }}' set to '{{ ipv6_address_public }}' was added. Proxy: {{ dns_proxied | default('no') }}" community.general.cloudflare_dns: account_api_token: "{{ cloudflare.api }}" account_email: "{{ cloudflare.email }}"- zone: "{{ fld.stdout }}"+ zone: "{{ domain_parsed.fld }}" state: present solo: true- proxied: "{{ dns.proxied | default('no') }}"+ proxied: "{{ dns_proxied | default('no') }}" type: AAAA value: "{{ ipv6_address_public }}" record: "{{ saltbox_type }}"- when: dns.ipv6+ when: dns_ipv6_enabled
modified
roles/cloudflare/tasks/subtasks/subdomains/remove_subdomain.yml
@@ -7,22 +7,22 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: "Subdomains | Remove Subdomain | Cloudflare: Remove 'saltbox' A record from '{{ fld.stdout }}'"+- name: "Subdomains | Remove Subdomain | Cloudflare: Remove 'saltbox' A record from '{{ domain_parsed.fld }}'" community.general.cloudflare_dns: account_api_token: "{{ cloudflare.api }}" account_email: "{{ cloudflare.email }}"- zone: "{{ fld.stdout }}"+ zone: "{{ domain_parsed.fld }}" state: absent type: A record: "saltbox"- when: dns.ipv4+ when: dns_ipv4_enabled -- name: "Subdomains | Remove Subdomain | Cloudflare: Remove 'saltbox' AAAA record from '{{ fld.stdout }}'"+- name: "Subdomains | Remove Subdomain | Cloudflare: Remove 'saltbox' AAAA record from '{{ domain_parsed.fld }}'" community.general.cloudflare_dns: account_api_token: "{{ cloudflare.api }}" account_email: "{{ cloudflare.email }}"- zone: "{{ fld.stdout }}"+ zone: "{{ domain_parsed.fld }}" state: absent type: AAAA record: "saltbox"- when: dns.ipv6+ when: dns_ipv6_enabled
modified
roles/cloudflare/tasks/subtasks/variables.yml
@@ -15,13 +15,13 @@ else '' }}" - name: Get FLD- ansible.builtin.shell: |- {{ saltbox_python }} -c "from tld import get_tld; res = get_tld(\"http://{{ dns_zone | default(user.domain) }}\", as_object=True); print(res.fld)"- register: fld+ tld_parse:+ url: "{{ dns_zone | default(user.domain) }}"+ register: domain_parsed - name: Variables | Fetch Cloudflare Zones ansible.builtin.uri:- url: 'https://api.cloudflare.com/client/v4/zones?name={{ fld.stdout }}'+ url: 'https://api.cloudflare.com/client/v4/zones?name={{ domain_parsed.fld }}' method: 'GET' headers: Content-Type: "application/json"
modified
roles/cloudflare/tasks/subtasks/zone.yml
@@ -8,9 +8,9 @@ ######################################################################### --- - name: Get FLD- ansible.builtin.shell: |- {{ saltbox_python }} -c "from tld import get_tld; res = get_tld(\"http://{{ user.domain }}\", as_object=True); print(res.fld)"- register: fld+ tld_parse:+ url: "{{ user.domain }}"+ register: domain_parsed - name: Get zones ansible.builtin.uri:@@ -22,7 +22,7 @@ - name: Get zone ID ansible.builtin.set_fact:- zone_id: "{{ get_zones_result.json.result | selectattr('name', 'equalto', fld.stdout) | map(attribute='id') | first }}"+ zone_id: "{{ get_zones_result.json.result | selectattr('name', 'equalto', domain_parsed.fld) | map(attribute='id') | first }}" - name: Get zone records ansible.builtin.uri:
modified
roles/cloudplow/defaults/main.yml
@@ -30,19 +30,13 @@ ################################ cloudplow_folder: "{{ cloudplow_name }}"- cloudplow_path: "{{ server_appdata_path }}/{{ cloudplow_folder }}"- cloudplow_venv_path: "{{ cloudplow_path }}/venv"- cloudplow_python_path: "{{ cloudplow_venv_path }}/bin/python3" cloudplow_python_version: "3.8" cloudplow_script_path: "{{ cloudplow_path }}/cloudplow.py"- cloudplow_requirements_path: "{{ cloudplow_path }}/requirements.txt"- cloudplow_config_path: "{{ cloudplow_path }}/config.json"- cloudplow_alias_path: "/usr/local/bin/{{ cloudplow_name }}" ################################@@ -50,11 +44,8 @@ ################################ cloudplow_git_repo_url: "https://github.com/l3uddz/cloudplow.git"- cloudplow_git_repo_dest: "{{ cloudplow_path }}"- cloudplow_git_repo_branch_primary: "master"- cloudplow_git_repo_branch_secondary: "develop" ################################@@ -69,9 +60,7 @@ ################################ rclone_config_path: "/home/{{ user.name }}/.config/rclone/rclone.conf"- rclone_binary_path: "/usr/bin/rclone"- cloudplow_rclone_google_template: |- "--user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36", "--checkers": 16,@@ -83,7 +72,6 @@ "--retries": 1, "--low-level-retries": 2, "--drive-stop-on-upload-limit": null- cloudplow_rclone_dropbox_template: |- "--user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36", "--checkers": 16,
modified
roles/cloudplow/tasks/main.yml
@@ -11,14 +11,6 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/systemd/delete_service.yml" vars: _service_name: "{{ cloudplow_service_name_old }}"- tags:- - "cloudplow-disable"--- name: Import Disable Task- ansible.builtin.import_tasks: "subtasks/disable.yml"- when: ('cloudplow-disable' in ansible_run_tags)- tags:- - "cloudplow-disable" - name: Reset Cloudplow directory ansible.builtin.file:@@ -53,6 +45,11 @@ - name: Clone project git repo ansible.builtin.include_tasks: "{{ resources_tasks_path }}/git/clone_git_repo.yml"+ vars:+ git_repo_url: "{{ cloudplow_git_repo_url }}"+ git_repo_dest: "{{ cloudplow_git_repo_dest }}"+ git_repo_branch_primary: "{{ cloudplow_git_repo_branch_primary }}"+ git_repo_branch_secondary: "{{ cloudplow_git_repo_branch_secondary }}" - name: "Execute Python role" ansible.builtin.include_role:@@ -108,7 +105,7 @@ force: true - name: Import Settings Task- ansible.builtin.import_tasks: "subtasks/settings.yml"+ ansible.builtin.include_tasks: "subtasks/settings.yml" - name: Import '{{ cloudplow_service_name }}.service' ansible.builtin.template:
modified
roles/cloudplow/tasks/subtasks/settings.yml
@@ -18,12 +18,12 @@ - name: Settings | Check if SABnzbd is installed ansible.builtin.stat:- path: "{{ sabnzbd_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='sabnzbd') }}" register: stat_sabnzbd - name: Settings | Lookup SABnzbd API Key ansible.builtin.set_fact:- sabnzbd_apikey: "{{ lookup('file', sabnzbd_paths_config_location) | regex_search('^api_key *= *.*', multiline=True) | regex_replace('.*= *(.*)$', '\\1') }}"+ sabnzbd_apikey: "{{ lookup('file', lookup('role_var', '_paths_config_location', role='sabnzbd')) | regex_search('^api_key *= *.*', multiline=True) | regex_replace('.*= *(.*)$', '\\1') }}" when: stat_sabnzbd.stat.exists - name: Settings | Check if 'config.json' exists@@ -63,7 +63,7 @@ block: - name: Settings | Install required packages ansible.builtin.apt:- state: present+ state: latest name: - jq - moreutils@@ -76,13 +76,13 @@ - name: Settings | Update NZBGet credentials in 'config.json' ansible.builtin.shell: |- jq '.nzbget.url = "{{ nzbget_web_local_url_web_login }}"' {{ cloudplow_config_path }} | sponge {{ cloudplow_config_path }}+ jq '.nzbget.url = "{{ lookup('role_var', '_web_local_url_web_login', role='nzbget') }}"' {{ cloudplow_config_path }} | sponge {{ cloudplow_config_path }} become: true become_user: "{{ user.name }}" - name: Settings | Update SABnzbd URL in 'config.json' ansible.builtin.shell: |- jq '.sabnzbd.url = "{{ sabnzbd_web_local_url }}"' {{ cloudplow_config_path }} | sponge {{ cloudplow_config_path }}+ jq '.sabnzbd.url = "{{ lookup('role_var', '_web_local_url', role='sabnzbd') }}"' {{ cloudplow_config_path }} | sponge {{ cloudplow_config_path }} become: true become_user: "{{ user.name }}" @@ -95,7 +95,7 @@ - name: Settings | Update Plex URL in 'config.json' ansible.builtin.shell: |- jq '.plex.url = "{{ plex_web_url }}"' {{ cloudplow_config_path }} | sponge {{ cloudplow_config_path }}+ jq '.plex.url = "{{ lookup('role_var', '_web_url', role='plex') }}"' {{ cloudplow_config_path }} | sponge {{ cloudplow_config_path }} become: true become_user: "{{ user.name }}"
modified
roles/cloudplow/templates/config.json.j2
@@ -8,7 +8,7 @@ "notifications": {}, "nzbget": { "enabled": false,- "url": "{{ nzbget_web_local_url_web_login }}"+ "url": "{{ nzbget_role_web_local_url_web_login }}" }, "plex": { "enabled": false,@@ -28,7 +28,7 @@ "url": "http://localhost:7949" }, "token": "{{ plex_auth_token | default('') }}",- "url": "{{ plex_web_url }}"+ "url": "{{ plex_role_web_url }}" }, "remotes": { {% for item in cloudplow_remotes %}@@ -57,16 +57,16 @@ }, "remove_empty_dir_depth": 2, "sync_remote": "{{ (item | filter_rclone_remote_with_path)- + lookup('vars', 'cloudplow_remote_' + (item | filter_rclone_remote_name) + '_folder', default=cloudplow_remote_default_folder)- if (item.settings.template != 'nfs')- else ('/mnt/remote/' + (item | filter_rclone_remote_name))- + lookup('vars', 'cloudplow_remote_' + (item | filter_rclone_remote_name) + '_folder', default=cloudplow_remote_default_folder) }}",+ + lookup('vars', 'cloudplow_remote_' + (item | filter_rclone_remote_name) + '_folder', default=cloudplow_remote_default_folder)+ if (item.settings.template != 'nfs')+ else ('/mnt/remote/' + (item | filter_rclone_remote_name))+ + lookup('vars', 'cloudplow_remote_' + (item | filter_rclone_remote_name) + '_folder', default=cloudplow_remote_default_folder) }}", "upload_folder": "{{ item.settings.upload_from }}", "upload_remote": "{{ (item | filter_rclone_remote_with_path)- + lookup('vars', 'cloudplow_remote_' + (item | filter_rclone_remote_name) + '_folder', default=cloudplow_remote_default_folder)- if (item.settings.template != 'nfs')- else ('/mnt/remote/' + (item | filter_rclone_remote_name))- + lookup('vars', 'cloudplow_remote_' + (item | filter_rclone_remote_name) + '_folder', default=cloudplow_remote_default_folder) }}"+ + lookup('vars', 'cloudplow_remote_' + (item | filter_rclone_remote_name) + '_folder', default=cloudplow_remote_default_folder)+ if (item.settings.template != 'nfs')+ else ('/mnt/remote/' + (item | filter_rclone_remote_name))+ + lookup('vars', 'cloudplow_remote_' + (item | filter_rclone_remote_name) + '_folder', default=cloudplow_remote_default_folder) }}" {% if loop.index == loop.length %}}{% else %}},{{ '\n' }}{% endif %} {% endfor %} @@ -74,7 +74,7 @@ "sabnzbd": { "apikey": "{{ sabnzbd_apikey | default('') }}", "enabled": false,- "url": "{{ sabnzbd_web_local_url }}"+ "url": "{{ sabnzbd_role_web_local_url }}" }, "syncer": {}, "uploader": {
modified
roles/common/tasks/main.yml
@@ -10,8 +10,8 @@ - name: BTRFS Tasks ansible.builtin.include_tasks: "btrfs.yml" loop:- - /opt- - /mnt/local+ - "{{ server_appdata_path }}"+ - "{{ server_local_folder_path }}" loop_control: loop_var: outer_item @@ -27,11 +27,11 @@ - /home/{{ user.name }}/logs - /home/{{ user.name }}/.config - /home/{{ user.name }}/.config/pip- - /opt- - /opt/saltbox+ - "{{ server_appdata_path }}"+ - "{{ server_appdata_path }}/saltbox" - /mnt- - /mnt/local- - /mnt/local/Media+ - "{{ server_local_folder_path }}"+ - "{{ server_local_folder_path }}/Media" - name: Create common Media directories ansible.builtin.file:@@ -42,9 +42,9 @@ mode: "0775" when: common_create_media_subfolders with_items:- - /mnt/local/Media/Movies- - /mnt/local/Media/Music- - /mnt/local/Media/TV+ - "{{ server_local_folder_path }}/Media/Movies"+ - "{{ server_local_folder_path }}/Media/Music"+ - "{{ server_local_folder_path }}/Media/TV" - name: Check if 'localhost.yml' exists ansible.builtin.stat:@@ -62,7 +62,7 @@ - name: Install required packages ansible.builtin.apt:- state: present+ state: latest name: - apt-transport-https - ca-certificates@@ -70,7 +70,7 @@ - name: Install common packages ansible.builtin.apt:- state: present+ state: latest name: - "nano" - "zip"@@ -102,25 +102,26 @@ - "moreutils" - "unrar" - "python3-virtualenv"+ - "bash-completion" - name: Install extra packages ansible.builtin.apt:- state: present+ state: latest name: - "run-one" - name: Fetch PCI Info ansible.builtin.shell: "lspci -v -s $(lspci | grep -E '.*VGA.*Intel.*' | cut -d' ' -f 1) 2>/dev/null || :" register: lscpi_resp- when: gpu.intel+ when: use_intel - name: Install 'intel-gpu-tools' ansible.builtin.apt:- state: present+ state: latest name: - "intel-gpu-tools" when:- - gpu.intel+ - use_intel - ('i915' in lscpi_resp.stdout) - name: Check to see if 'unrar' installed@@ -131,7 +132,7 @@ - name: Install 'unrar-free' if 'unrar' was not installed ansible.builtin.apt: name: unrar-free- state: present+ state: latest when: (not unrar_binary.stat.exists) - name: "Import 'pip.conf'"
modified
roles/crowdsec/tasks/main.yml
@@ -40,7 +40,7 @@ ansible.builtin.dpkg_selections: name: "crowdsec" selection: install- when: ('crowdsec' in ansible_facts.packages)+ when: ('crowdsec' in ansible_facts['packages']) - name: Install crowdsec ansible.builtin.apt:@@ -56,7 +56,7 @@ ansible.builtin.dpkg_selections: name: "crowdsec-firewall-bouncer-iptables" selection: install- when: ('crowdsec-firewall-bouncer-iptables' in ansible_facts.packages)+ when: ('crowdsec-firewall-bouncer-iptables' in ansible_facts['packages']) - name: Manage iptables bouncer install ansible.builtin.apt:
modified
roles/crowdsec/templates/acquis.yaml.j2
@@ -10,7 +10,7 @@ type: syslog --- filenames:- - /opt/traefik/*.log+ - {{ server_appdata_path }}/traefik/*.log labels: type: traefik ---
modified
roles/custom/tasks/main.yml
@@ -18,11 +18,11 @@ - name: Custom | Install custom deb packages ansible.builtin.apt:- state: present+ state: latest deb: "{{ item }}" loop: "{{ custom_deb }}" - name: Install custom pip modules ansible.builtin.shell: "pip install {{ item }}" loop: "{{ custom_pip }}"- when: ansible_distribution_version is version('22.04', '<=')+ when: ansible_facts['distribution_version'] is version('22.04', '<=')
modified
roles/ddclient/tasks/main.yml
@@ -15,7 +15,7 @@ - name: Create ddclient directories ansible.builtin.file:- path: "/opt/ddclient"+ path: "{{ server_appdata_path }}/ddclient" state: directory owner: "{{ user.name }}" group: "{{ user.name }}"@@ -45,7 +45,7 @@ - name: Import 'ddclient.conf' ansible.builtin.template: src: "ddclient.conf.j2"- dest: "/opt/ddclient/ddclient.conf"+ dest: "{{ server_appdata_path }}/ddclient/ddclient.conf" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"@@ -61,7 +61,7 @@ PUID: "{{ uid }}" PGID: "{{ gid }}" volumes:- - "/opt/ddclient:/config"+ - "{{ server_appdata_path }}/ddclient:/config" labels: "com.github.saltbox.saltbox_managed": "true" networks:
modified
roles/ddns/defaults/main.yml
@@ -18,116 +18,69 @@ ################################ # Comma separated FQDN's that you want the container to manage-ddns_custom_urls: ""-ddns_delay: "60"+ddns_role_custom_urls: ""+ddns_role_delay: "60" ################################ # Docker ################################ # Container-ddns_docker_container: "{{ ddns_name }}"+ddns_role_docker_container: "{{ ddns_name }}" # Image-ddns_docker_image_pull: true-ddns_docker_image_tag: "latest"-ddns_docker_image: "saltydk/dns:{{ ddns_docker_image_tag }}"--# Ports-ddns_docker_ports_defaults: []-ddns_docker_ports_custom: []-ddns_docker_ports: "{{ ddns_docker_ports_defaults- + ddns_docker_ports_custom }}"+ddns_role_docker_image_pull: true+ddns_role_docker_image_repo: "saltydk/dns"+ddns_role_docker_image_tag: "latest"+ddns_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='ddns') }}:{{ lookup('role_var', '_docker_image_tag', role='ddns') }}" # Envs-ddns_docker_envs_default:+ddns_role_docker_envs_default: TZ: "{{ tz }}" CLOUDFLARE_API_KEY: "{{ cloudflare.api }}" CLOUDFLARE_EMAIL: "{{ cloudflare.email }}"- CLOUDFLARE_PROXY_DEFAULT: "{{ dns.proxied | string }}"+ CLOUDFLARE_PROXY_DEFAULT: "{{ dns_proxied | string }}" TRAEFIK_API_URL: "http://traefik:8080" TRAEFIK_ENTRYPOINTS: "websecure,web"- CUSTOM_URLS: "{{ ddns_custom_urls if (ddns_custom_urls | length > 0) else omit }}"- IP_VERSION: "{{ 'both' if (dns.ipv4 and dns.ipv6) else ('4' if dns.ipv4 else '6') }}"- DELAY: "{{ ddns_delay }}"-ddns_docker_envs_custom: {}-ddns_docker_envs: "{{ ddns_docker_envs_default- | combine(ddns_docker_envs_custom) }}"--# Commands-ddns_docker_volumes_global: false-ddns_docker_commands_default: []-ddns_docker_commands_custom: []-ddns_docker_commands: "{{ ddns_docker_commands_default- + ddns_docker_commands_custom }}"+ CUSTOM_URLS: "{{ lookup('role_var', '_custom_urls', role='ddns') if (lookup('role_var', '_custom_urls', role='ddns') | length > 0) else omit }}"+ IP_VERSION: "{{ 'both' if (dns_ipv4_enabled and dns_ipv6_enabled) else ('4' if dns_ipv4_enabled else '6') }}"+ DELAY: "{{ lookup('role_var', '_delay', role='ddns') }}"+ddns_role_docker_envs_custom: {}+ddns_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='ddns')+ | combine(lookup('role_var', '_docker_envs_custom', role='ddns')) }}" # Volumes-ddns_docker_volumes_default: []-ddns_docker_volumes_custom: []-ddns_docker_volumes: "{{ ddns_docker_volumes_default- + ddns_docker_volumes_custom }}"+ddns_role_docker_volumes_global: false # Mounts-ddns_docker_mounts_default:+ddns_role_docker_mounts_default: - target: /tmp type: tmpfs-ddns_docker_mounts_custom: []-ddns_docker_mounts: "{{ lookup('vars', ddns_name + '_docker_mounts_default', default=ddns_docker_mounts_default)- + lookup('vars', ddns_name + '_docker_mounts_custom', default=ddns_docker_mounts_custom) }}"--# Devices-ddns_docker_devices_default: []-ddns_docker_devices_custom: []-ddns_docker_devices: "{{ ddns_docker_devices_default- + ddns_docker_devices_custom }}"--# Hosts-ddns_docker_hosts_default: {}-ddns_docker_hosts_custom: {}-ddns_docker_hosts: "{{ docker_hosts_common- | combine(ddns_docker_hosts_default)- | combine(ddns_docker_hosts_custom) }}"--# Labels-ddns_docker_labels_default: {}-ddns_docker_labels_custom: {}-ddns_docker_labels: "{{ docker_labels_common- | combine(ddns_docker_labels_default)- | combine(ddns_docker_labels_custom) }}"+ddns_role_docker_mounts_custom: []+ddns_role_docker_mounts: "{{ lookup('role_var', '_docker_mounts_default', role='ddns')+ + lookup('role_var', '_docker_mounts_custom', role='ddns') }}" # Hostname-ddns_docker_hostname: "{{ ddns_name }}"+ddns_role_docker_hostname: "{{ ddns_name }}" # Networks-ddns_docker_networks_alias: "{{ ddns_name }}"-ddns_docker_networks_default: []-ddns_docker_networks_custom: []-ddns_docker_networks: "{{ docker_networks_common- + ddns_docker_networks_default- + ddns_docker_networks_custom }}"--# Capabilities-ddns_docker_capabilities_default: []-ddns_docker_capabilities_custom: []-ddns_docker_capabilities: "{{ ddns_docker_capabilities_default- + ddns_docker_capabilities_custom }}"--# Security Opts-ddns_docker_security_opts_default: []-ddns_docker_security_opts_custom: []-ddns_docker_security_opts: "{{ ddns_docker_security_opts_default- + ddns_docker_security_opts_custom }}"+ddns_role_docker_networks_alias: "{{ ddns_name }}"+ddns_role_docker_networks_default: []+ddns_role_docker_networks_custom: []+ddns_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='ddns')+ + lookup('role_var', '_docker_networks_custom', role='ddns') }}" # Restart Policy-ddns_docker_restart_policy: unless-stopped+ddns_role_docker_restart_policy: unless-stopped # State-ddns_docker_state: started+ddns_role_docker_state: started # Init-ddns_docker_init: true+ddns_role_docker_init: true # Dependencies-ddns_depends_on: "traefik"-ddns_depends_on_delay: "10"-ddns_depends_on_healthchecks: "false"+ddns_role_depends_on: "traefik"+ddns_role_depends_on_delay: "10"+ddns_role_depends_on_healthchecks: "false"
modified
roles/ddns/tasks/main.yml
@@ -12,7 +12,7 @@ msg: - "IPv4 or IPv6 DNS management has to be enabled." - "Check your 'adv_settings.yml' config."- when: not (dns.ipv4 or dns.ipv6)+ when: not (dns_ipv4_enabled or dns_ipv6_enabled) - name: "Fail if not using Cloudflare" ansible.builtin.fail:
modified
roles/deluge/defaults/main.yml
@@ -17,168 +17,131 @@ # Paths ################################ -deluge_paths_folder: "{{ deluge_name }}"-deluge_paths_location: "{{ server_appdata_path }}/{{ deluge_paths_folder }}"-deluge_paths_conf: "{{ deluge_paths_location }}/core.conf"-deluge_paths_downloads_location: "{{ downloads_torrents_path }}/{{ deluge_paths_folder }}"-deluge_paths_folders_list:- - "{{ deluge_paths_location }}"- - "{{ deluge_paths_downloads_location }}"- - "{{ deluge_paths_downloads_location }}/completed"- - "{{ deluge_paths_downloads_location }}/incoming"- - "{{ deluge_paths_downloads_location }}/watched"- - "{{ deluge_paths_downloads_location }}/torrents"+deluge_role_paths_folder: "{{ deluge_name }}"+deluge_role_paths_location: "{{ server_appdata_path }}/{{ deluge_role_paths_folder }}"+deluge_role_paths_conf: "{{ deluge_role_paths_location }}/core.conf"+deluge_role_paths_downloads_location: "{{ downloads_torrents_path }}/{{ deluge_role_paths_folder }}"+deluge_role_paths_folders_list:+ - "{{ deluge_role_paths_location }}"+ - "{{ deluge_role_paths_downloads_location }}"+ - "{{ deluge_role_paths_downloads_location }}/completed"+ - "{{ deluge_role_paths_downloads_location }}/incoming"+ - "{{ deluge_role_paths_downloads_location }}/watched"+ - "{{ deluge_role_paths_downloads_location }}/torrents" ################################ # Web ################################ -deluge_web_subdomain: "{{ deluge_name }}"-deluge_web_domain: "{{ user.domain }}"-deluge_web_port: "8112"-deluge_web_url: "{{ 'https://' + (deluge_web_subdomain + '.' + deluge_web_domain- if (deluge_web_subdomain | length > 0)- else deluge_web_domain) }}"+deluge_role_web_subdomain: "{{ deluge_name }}"+deluge_role_web_domain: "{{ user.domain }}"+deluge_role_web_port: "8112"+deluge_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='deluge') + '.' + lookup('role_var', '_web_domain', role='deluge')+ if (lookup('role_var', '_web_subdomain', role='deluge') | length > 0)+ else lookup('role_var', '_web_domain', role='deluge')) }}" ################################ # DNS ################################ -deluge_dns_record: "{{ lookup('vars', deluge_name + '_web_subdomain', default=deluge_web_subdomain) }}"-deluge_dns_zone: "{{ lookup('vars', deluge_name + '_web_domain', default=deluge_web_domain) }}"-deluge_dns_proxy: "{{ dns.proxied }}"+deluge_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='deluge') }}"+deluge_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='deluge') }}"+deluge_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -deluge_traefik_sso_middleware: ""-deluge_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', deluge_name + '_name', default=deluge_name)- if (deluge_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-deluge_traefik_middleware_custom: ""-deluge_traefik_certresolver: "{{ traefik_default_certresolver }}"-deluge_traefik_enabled: true-deluge_traefik_api_enabled: false-deluge_traefik_api_endpoint: ""+deluge_role_traefik_sso_middleware: ""+deluge_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + deluge_name+ if (lookup('role_var', '_themepark_enabled', role='deluge') and global_themepark_plugin_enabled)+ else '') }}"+deluge_role_traefik_middleware_custom: ""+deluge_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+deluge_role_traefik_enabled: true+deluge_role_traefik_api_enabled: false+deluge_role_traefik_api_endpoint: "" ################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-deluge_themepark_enabled: false-deluge_themepark_app: "deluge"-deluge_themepark_theme: "{{ global_themepark_theme }}"-deluge_themepark_domain: "{{ global_themepark_domain }}"-deluge_themepark_addons: []+deluge_role_themepark_enabled: false+deluge_role_themepark_app: "deluge"+deluge_role_themepark_theme: "{{ global_themepark_theme }}"+deluge_role_themepark_domain: "{{ global_themepark_domain }}"+deluge_role_themepark_addons: [] ################################ # Docker ################################ # Container-deluge_docker_container: "{{ deluge_name }}"+deluge_role_docker_container: "{{ deluge_name }}" # Image-deluge_docker_image_pull: true-deluge_docker_image_repo: "lscr.io/linuxserver/deluge"-deluge_docker_image_tag: "latest"-deluge_docker_image: "{{ lookup('vars', deluge_name + '_docker_image_repo', default=deluge_docker_image_repo)- + ':' + lookup('vars', deluge_name + '_docker_image_tag', default=deluge_docker_image_tag) }}"+deluge_role_docker_image_pull: true+deluge_role_docker_image_repo: "lscr.io/linuxserver/deluge"+deluge_role_docker_image_tag: "latest"+deluge_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='deluge') }}:{{ lookup('role_var', '_docker_image_tag', role='deluge') }}" # Ports-deluge_docker_ports_58112: "{{ port_lookup_58112.meta.port- if (port_lookup_58112.meta.port is defined) and (port_lookup_58112.meta.port | trim | length > 0)- else '58112' }}"+deluge_role_docker_ports_58112: "{{ port_lookup_58112.meta.port+ if (port_lookup_58112.meta.port is defined) and (port_lookup_58112.meta.port | trim | length > 0)+ else '58112' }}" -deluge_docker_ports_defaults:- - "{{ deluge_docker_ports_58112 }}:{{ deluge_docker_ports_58112 }}"- - "{{ deluge_docker_ports_58112 }}:{{ deluge_docker_ports_58112 }}/udp"-deluge_docker_ports_custom: []-deluge_docker_ports: "{{ deluge_docker_ports_defaults- + deluge_docker_ports_custom }}"+deluge_role_docker_ports_default:+ - "{{ deluge_role_docker_ports_58112 }}:{{ deluge_role_docker_ports_58112 }}"+ - "{{ deluge_role_docker_ports_58112 }}:{{ deluge_role_docker_ports_58112 }}/udp"+deluge_role_docker_ports_custom: []+deluge_role_docker_ports: "{{ lookup('role_var', '_docker_ports_default', role='deluge')+ + lookup('role_var', '_docker_ports_custom', role='deluge') }}" # Envs-deluge_docker_envs_default:+deluge_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" TZ: "{{ tz }}" UMASK: "002"-deluge_docker_envs_custom: {}-deluge_docker_envs: "{{ lookup('vars', deluge_name + '_docker_envs_default', default=deluge_docker_envs_default)- | combine(lookup('vars', deluge_name + '_docker_envs_custom', default=deluge_docker_envs_custom)) }}"--# Commands-deluge_docker_commands_default: []-deluge_docker_commands_custom: []-deluge_docker_commands: "{{ lookup('vars', deluge_name + '_docker_commands_default', default=deluge_docker_commands_default)- + lookup('vars', deluge_name + '_docker_commands_custom', default=deluge_docker_commands_custom) }}"+deluge_role_docker_envs_custom: {}+deluge_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='deluge')+ | combine(lookup('role_var', '_docker_envs_custom', role='deluge')) }}" # Volumes-deluge_docker_volumes_default:- - "{{ deluge_paths_location }}:/config"+deluge_role_docker_volumes_default:+ - "{{ deluge_role_paths_location }}:/config" - "{{ server_appdata_path }}/scripts:/scripts"-deluge_docker_volumes_custom: []-deluge_docker_volumes: "{{ lookup('vars', deluge_name + '_docker_volumes_default', default=deluge_docker_volumes_default)- + lookup('vars', deluge_name + '_docker_volumes_custom', default=deluge_docker_volumes_custom) }}"--# Devices-deluge_docker_devices_default: []-deluge_docker_devices_custom: []-deluge_docker_devices: "{{ lookup('vars', deluge_name + '_docker_devices_default', default=deluge_docker_devices_default)- + lookup('vars', deluge_name + '_docker_devices_custom', default=deluge_docker_devices_custom) }}"--# Hosts-deluge_docker_hosts_default: {}-deluge_docker_hosts_custom: {}-deluge_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', deluge_name + '_docker_hosts_default', default=deluge_docker_hosts_default))- | combine(lookup('vars', deluge_name + '_docker_hosts_custom', default=deluge_docker_hosts_custom)) }}"+deluge_role_docker_volumes_custom: []+deluge_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='deluge')+ + lookup('role_var', '_docker_volumes_custom', role='deluge') }}" # Labels-deluge_docker_labels_default: {}-deluge_docker_labels_custom: {}-deluge_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', deluge_name + '_docker_labels_default', default=deluge_docker_labels_default))- | combine((traefik_themepark_labels- if (deluge_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', deluge_name + '_docker_labels_custom', default=deluge_docker_labels_custom)) }}"+deluge_role_docker_labels_default: {}+deluge_role_docker_labels_custom: {}+deluge_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='deluge')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='deluge') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='deluge')) }}" # Hostname-deluge_docker_hostname: "{{ deluge_name }}"--# Network Mode-deluge_docker_network_mode_default: "{{ docker_networks_name_common }}"-deluge_docker_network_mode: "{{ lookup('vars', deluge_name + '_docker_network_mode_default', default=deluge_docker_network_mode_default) }}"+deluge_role_docker_hostname: "{{ deluge_name }}" # Networks-deluge_docker_networks_alias: "{{ deluge_name }}"-deluge_docker_networks_default: []-deluge_docker_networks_custom: []-deluge_docker_networks: "{{ docker_networks_common- + lookup('vars', deluge_name + '_docker_networks_default', default=deluge_docker_networks_default)- + lookup('vars', deluge_name + '_docker_networks_custom', default=deluge_docker_networks_custom) }}"--# Capabilities-deluge_docker_capabilities_default: []-deluge_docker_capabilities_custom: []-deluge_docker_capabilities: "{{ lookup('vars', deluge_name + '_docker_capabilities_default', default=deluge_docker_capabilities_default)- + lookup('vars', deluge_name + '_docker_capabilities_custom', default=deluge_docker_capabilities_custom) }}"--# Security Opts-deluge_docker_security_opts_default: []-deluge_docker_security_opts_custom: []-deluge_docker_security_opts: "{{ lookup('vars', deluge_name + '_docker_security_opts_default', default=deluge_docker_security_opts_default)- + lookup('vars', deluge_name + '_docker_security_opts_custom', default=deluge_docker_security_opts_custom) }}"+deluge_role_docker_networks_alias: "{{ deluge_name }}"+deluge_role_docker_networks_default: []+deluge_role_docker_networks_custom: []+deluge_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='deluge')+ + lookup('role_var', '_docker_networks_custom', role='deluge') }}" # Restart Policy-deluge_docker_restart_policy: unless-stopped+deluge_role_docker_restart_policy: unless-stopped # Stop Timeout-deluge_docker_stop_timeout: 900+deluge_role_docker_stop_timeout: 900 # State-deluge_docker_state: started+deluge_role_docker_state: started
modified
roles/deluge/tasks/main2.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -21,10 +21,10 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/directories/create_directories.yml" - name: Pre-Install Tasks- ansible.builtin.import_tasks: "subtasks/pre-install/main.yml"+ ansible.builtin.include_tasks: "subtasks/pre-install/main.yml" - name: Create Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: Post-Install Tasks- ansible.builtin.import_tasks: "subtasks/post-install/main.yml"+ ansible.builtin.include_tasks: "subtasks/post-install/main.yml"
modified
roles/deluge/tasks/subtasks/post-install/main.yml
@@ -9,7 +9,7 @@ --- - name: Post-Install | Wait for config to be created ansible.builtin.wait_for:- path: "{{ deluge_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='deluge') }}" state: present - name: Post-Install | Wait for 10 seconds@@ -20,7 +20,7 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/stop_docker_container.yml" - name: Post-Install | Settings Task- ansible.builtin.import_tasks: "settings/main.yml"+ ansible.builtin.include_tasks: "settings/main.yml" - name: Post-Install | Start container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_docker_container.yml"
modified
roles/deluge/tasks/subtasks/post-install/settings/main.yml
@@ -9,25 +9,25 @@ --- - name: Post-Install | Settings | Update 'core.conf' | Set listen ports ansible.builtin.replace:- path: "{{ deluge_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='deluge') }}" regexp: '"listen_ports": (\[[\d|\W]+\]),'- replace: '"listen_ports": [{{ deluge_docker_ports_58112 }},{{ deluge_docker_ports_58112 }}],'+ replace: '"listen_ports": [{{ lookup("role_var", "_docker_ports_58112", role="deluge") }},{{ lookup("role_var", "_docker_ports_58112", role="deluge") }}],' - name: Post-Install | Settings | Update 'core.conf' | Set listen random port to null ansible.builtin.replace:- path: "{{ deluge_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='deluge') }}" regexp: '"listen_random_port": ([\d|\W]+),' replace: '"listen_random_port": null,' - name: Post-Install | Settings | Update 'core.conf' | Disable random incoming port ansible.builtin.replace:- path: "{{ deluge_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='deluge') }}" regexp: '"random_port": ([\w]+),' replace: '"random_port": false,' - name: Post-Install | Settings | Update 'core.conf' | Enable random outgoing ports ansible.builtin.replace:- path: "{{ deluge_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='deluge') }}" regexp: '"random_outgoing_ports": ([\w]+),' replace: '"random_outgoing_ports": true,' @@ -36,7 +36,7 @@ block: - name: Post-Install | Settings | Update 'core.conf' | Disable Network Extras ansible.builtin.replace:- path: "{{ deluge_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='deluge') }}" regexp: '"{{ item }}": ([\w]+){{ "," if item != "utpex" else "" }}' replace: '"{{ item }}": false{{ "," if item != "utpex" else "" }}' loop:
modified
roles/deluge/tasks/subtasks/pre-install/main.yml
@@ -17,5 +17,5 @@ - name: Pre-Install | Check if existing config file exists ansible.builtin.stat:- path: "{{ deluge_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='deluge') }}" register: deluge_paths_conf_stat
modified
roles/diag/defaults/main.yml
@@ -7,17 +7,14 @@ # GNU General Public License v3.0 # ######################################################################### ----diagnose_cloudflare_script_path: "/srv/git/saltbox/roles/diag/files/cloudflare_ssl.py"- diagnose_vars: - "Saltbox repo branch: {{ git_branch.stdout if (git_branch is defined and git_branch.stdout is defined) else 'Failed retrieving' }}" - "Saltbox repo commit: {{ git_version.stdout if (git_version is defined and git_version.stdout is defined) else 'Failed retrieving' }}" - "Saltbox upstream commit: {{ git_origin_version.stdout if (git_origin_version is defined and git_origin_version.stdout is defined) else 'Failed retrieving' }}" - "cloudflare_is_enabled: {{ cloudflare_is_enabled | lower }}"- - "{{ 'Cloudflare venv deployed: ' + (diagnose_cloudflare_venv.stat.exists | string | lower) if (cloudflare_is_enabled and (dns.ipv4 or dns.ipv6)) else '' }}"- - "{{ 'Cloudflare IPv4 automation: ' + (dns.ipv4 | string | lower) if cloudflare_is_enabled else '' }}"- - "{{ 'Cloudflare IPv6 automation: ' + (dns.ipv6 | string | lower) if cloudflare_is_enabled else '' }}"- - "{{ 'Cloudflare SSL Mode: ' + cloudflare_ssl.stdout if (cloudflare_is_enabled and cloudflare_ssl is defined and cloudflare_ssl.stdout is defined) else '' }}"+ - "{{ 'Cloudflare IPv4 automation: ' + (dns_ipv4_enabled | string | lower) if cloudflare_is_enabled else '' }}"+ - "{{ 'Cloudflare IPv6 automation: ' + (dns_ipv6_enabled | string | lower) if cloudflare_is_enabled else '' }}"+ - "{{ 'Cloudflare SSL Mode: ' + cloudflare_ssl.ssl_mode if (cloudflare_is_enabled and cloudflare_ssl is defined and cloudflare_ssl.ssl_mode is defined) else '' }}" - "plex_account_is_enabled: {{ plex_account_is_enabled | lower }}" - "rclone_remote_is_defined: {{ rclone_remote_is_defined | lower }}" - "use_cloudplow: {{ use_cloudplow | lower }}"
modified
roles/diag/tasks/main.yml
@@ -10,36 +10,12 @@ - name: Cloudflare API lookup when: cloudflare_is_enabled block:- - name: Check if Cloudflare venv exists- ansible.builtin.stat:- path: "/srv/cloudflare-helper/venv/bin/python3"- register: diagnose_cloudflare_venv-- - name: Include Cloudflare Setup venv role default vars- ansible.builtin.include_vars: "{{ resources_path }}/roles/dns/defaults/main.yml"- when: not diagnose_cloudflare_venv.stat.exists-- - name: Setup Cloudflare venv- ansible.builtin.include_tasks: "{{ resources_path }}/roles/dns/tasks/cloudflare/subtasks/setup.yml"- when: not diagnose_cloudflare_venv.stat.exists- vars:- cloudflare_files:- - "{{ resources_path }}/roles/dns/files/fetch_cloudflare_records.py"- - "{{ resources_path }}/roles/dns/files/requirements.txt"-- - name: Get FLD- ansible.builtin.shell: |- {{ saltbox_python }} -c "from tld import get_tld; res = get_tld(\"http://{{ user.domain }}\", as_object=True); print(res.fld)"- register: diagnose_fld-- - name: Fetch Record information- ansible.builtin.shell: "/srv/cloudflare-helper/venv/bin/python3 {{ diagnose_cloudflare_script_path }} --auth_key '{{ cloudflare.api }}' --auth_email '{{ cloudflare.email }}' --zone_name '{{ diagnose_fld.stdout }}'"+ - name: Get Cloudflare SSL/TLS mode+ cloudflare_ssl:+ auth_email: "{{ cloudflare.email }}"+ auth_key: "{{ cloudflare.api }}"+ domain: "{{ user.domain }}" register: cloudflare_ssl-- - name: Print Failure Output- ansible.builtin.fail:- msg: "{{ cloudflare_ssl.stderr }}"- when: cloudflare_ssl.rc != 0 - name: Gather mount information ansible.builtin.setup:@@ -49,7 +25,7 @@ - name: Filter mount information ansible.builtin.set_fact: host_mounts: "{{ host_mounts | default({}) | combine({item.mount: item.fstype}) }}"- loop: "{{ ansible_mounts }}"+ loop: "{{ ansible_facts['mounts'] }}" - name: "Diagnose variables" ansible.builtin.debug:
modified
roles/diun/defaults/main.yml
@@ -17,100 +17,63 @@ # Paths ################################ -diun_paths_folder: "{{ diun_name }}"-diun_paths_location: "{{ server_appdata_path }}/{{ diun_paths_folder }}"-diun_paths_folders_list:- - "{{ diun_paths_location }}"- - "{{ diun_paths_location }}/data"+diun_role_paths_folder: "{{ diun_name }}"+diun_role_paths_location: "{{ server_appdata_path }}/{{ diun_role_paths_folder }}"+diun_role_paths_folders_list:+ - "{{ diun_role_paths_location }}"+ - "{{ diun_role_paths_location }}/data" ################################ # Docker ################################ # Container-diun_docker_container: "{{ diun_name }}"+diun_role_docker_container: "{{ diun_name }}" # Image-diun_docker_image_pull: true-diun_docker_image_tag: "latest"-diun_docker_image: "crazymax/diun:{{ diun_docker_image_tag }}"--# Ports-diun_docker_ports_defaults: []-diun_docker_ports_custom: []-diun_docker_ports: "{{ diun_docker_ports_defaults- + diun_docker_ports_custom }}"+diun_role_docker_image_pull: true+diun_role_docker_image_repo: "crazymax/diun"+diun_role_docker_image_tag: "latest"+diun_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='diun') }}:{{ lookup('role_var', '_docker_image_tag', role='diun') }}" # Envs-diun_docker_envs_default:+diun_role_docker_envs_default: TZ: "{{ tz }}" LOG_LEVEL: "info" LOG_JSON: "false"-diun_docker_envs_custom: {}-diun_docker_envs: "{{ diun_docker_envs_default- | combine(diun_docker_envs_custom) }}"--# Commands-diun_docker_commands_default: []-diun_docker_commands_custom: []-diun_docker_commands: "{{ diun_docker_commands_default- + diun_docker_commands_custom }}"+diun_role_docker_envs_custom: {}+diun_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='diun')+ | combine(lookup('role_var', '_docker_envs_custom', role='diun')) }}" # Volumes-diun_docker_volumes_default:- - "{{ diun_paths_location }}/data:/data"- - "{{ diun_paths_location }}/diun.yml:/diun.yml:ro"+diun_role_docker_volumes_default:+ - "{{ diun_role_paths_location }}/data:/data"+ - "{{ diun_role_paths_location }}/diun.yml:/diun.yml:ro" - "/var/run/docker.sock:/var/run/docker.sock"-diun_docker_volumes_custom: []-diun_docker_volumes: "{{ diun_docker_volumes_default- + diun_docker_volumes_custom }}"--# Devices-diun_docker_devices_default: []-diun_docker_devices_custom: []-diun_docker_devices: "{{ diun_docker_devices_default- + diun_docker_devices_custom }}"--# Hosts-diun_docker_hosts_default: {}-diun_docker_hosts_custom: {}-diun_docker_hosts: "{{ docker_hosts_common- | combine(diun_docker_hosts_default)- | combine(diun_docker_hosts_custom) }}"+diun_role_docker_volumes_custom: []+diun_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='diun')+ + lookup('role_var', '_docker_volumes_custom', role='diun') }}" # Labels-diun_docker_labels_default:+diun_role_docker_labels_default: diun.enable: "true"-diun_docker_labels_custom: {}-diun_docker_labels: "{{ docker_labels_common- | combine(diun_docker_labels_default)- | combine(diun_docker_labels_custom) }}"+diun_role_docker_labels_custom: {}+diun_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='diun')+ | combine(lookup('role_var', '_docker_labels_custom', role='diun')) }}" # Hostname-diun_docker_hostname: "{{ diun_name }}"+diun_role_docker_hostname: "{{ diun_name }}" # Networks-diun_docker_networks_alias: "{{ diun_name }}"-diun_docker_networks_default: []-diun_docker_networks_custom: []-diun_docker_networks: "{{ docker_networks_common- + diun_docker_networks_default- + diun_docker_networks_custom }}"--# Capabilities-diun_docker_capabilities_default: []-diun_docker_capabilities_custom: []-diun_docker_capabilities: "{{ diun_docker_capabilities_default- + diun_docker_capabilities_custom }}"--# Security Opts-diun_docker_security_opts_default: []-diun_docker_security_opts_custom: []-diun_docker_security_opts: "{{ diun_docker_security_opts_default- + diun_docker_security_opts_custom }}"+diun_role_docker_networks_alias: "{{ diun_name }}"+diun_role_docker_networks_default: []+diun_role_docker_networks_custom: []+diun_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='diun')+ + lookup('role_var', '_docker_networks_custom', role='diun') }}" # Restart Policy-diun_docker_restart_policy: unless-stopped+diun_role_docker_restart_policy: unless-stopped # State-diun_docker_state: started+diun_role_docker_state: started
modified
roles/diun/tasks/main.yml
@@ -16,7 +16,7 @@ - name: Import default config ansible.builtin.copy: src: "diun.yml"- dest: "{{ diun_paths_location }}/diun.yml"+ dest: "{{ lookup('role_var', '_paths_location', role='diun') }}/diun.yml" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"
modified
roles/docker/defaults/main.yml
@@ -22,9 +22,19 @@ # docker_config_custom: # debug: "true" docker_config_custom: {}++# CPU and Memory defaults+docker_cpus_default: ""+docker_memory_default: ""+ # Skip Container startup during core, saltbox, mediabox or feederbox # If the kernel has been updated and a reboot will happen docker_skip_start_during_meta_tag: "{{ saltbox_auto_reboot }}"++# Toggles pruning of dangling images after container creation.+docker_create_image_prune: true+docker_create_image_prune_delay: true+docker_create_image_prune_delay_timeout: 10 ################################ # Lookup@@ -37,7 +47,6 @@ ################################ docker_apt_key_id: 0EBFCD88- docker_apt_key_url: https://download.docker.com/linux/ubuntu/gpg ################################@@ -45,9 +54,7 @@ ################################ docker_apt_repo_version: stable--docker_apt_repo_url: "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_apt_repo_version }}" # noqa line-length-+docker_apt_repo_url: "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] | lower }} {{ docker_apt_repo_version }}" # noqa line-length docker_apt_repo_filename: docker ################################@@ -62,79 +69,51 @@ docker_version: "28" # Docker CE- docker_ce_name: "Docker CE"-docker_ce_package: "docker-ce={{ docker_ce_resolved_version | default('') }}"+docker_ce_package: "docker-ce={{ docker_ce_resolved_version }}" docker_ce_filepath: "/usr/bin/dockerd"- docker_ce_dpkg: "docker-ce" # Docker CE CLI- docker_ce_cli_name: "Docker CE CLI"-docker_ce_cli_package: "docker-ce-cli={{ docker_ce_cli_resolved_version | default('') }}"+docker_ce_cli_package: "docker-ce-cli={{ docker_ce_cli_resolved_version }}" docker_ce_cli_filepath: "/usr/bin/docker"- docker_ce_cli_dpkg: "docker-ce-cli" # Containerd- containerd_io_name: "Containerd"- containerd_io_package: "containerd.io"- containerd_io_filepath: "/usr/bin/containerd"- containerd_io_dpkg: "containerd.io" # Docker Compose- compose_cli_name: "Docker Compose"- compose_cli_package: "docker-compose-plugin"- compose_cli_filepath: "docker compose"- compose_cli_dpkg: "docker-compose-plugin" # Docker Rootless Extras- docker_rootless_name: "Docker Rootless Extras"- docker_rootless_package: "docker-ce-rootless-extras"- docker_rootless_filepath: "/usr/bin/rootlesskit"- docker_rootless_dpkg: "docker-ce-rootless-extras" # Misc-docker_package_state: "present" put_docker_dpkg_into_hold: true- docker_filesystem_path: "/media/docker-volume.img"- docker_filesystem_size: "20G"--docker_ipv6: "{{ dns.ipv6 | bool }}"+docker_ipv6: "{{ dns_ipv6_enabled }}" # Service- docker_service_after: "{{ mergerfs_service_name }}"- docker_service_sleep: "{{ 0 if continuous_integration or (not use_remote) else 120 }}"- docker_service_force: true- docker_service_check: "{{ docker_binary.stat.exists and (docker_service_running or ((remote_docker_service_running is defined) and remote_docker_service_running) or ((unionfs_docker_service_running is defined) and unionfs_docker_service_running)) }}"- docker_service_check_mounts: "{{ docker_binary.stat.exists and (((remote_docker_service_running is defined) and remote_docker_service_running) or ((unionfs_docker_service_running is defined) and unionfs_docker_service_running)) }}"- docker_update_hosts_service_runtime_max: "3600s"- docker_daemon_storage_driver: "{{ ('zfs' in var_lib_file_system.stdout) | ternary('zfs', 'overlay2') }}"- docker_daemon_template_force: true ################################@@ -147,11 +126,40 @@ # Docker Controller ################################ -docker_controller_python_version: "3.10"+docker_controller_binary_path: "/usr/local/bin/sdc"++docker_controller_releases_url: "{{ svm }}https://api.github.com/repos/saltyorg/sdc/releases/latest"++docker_controller_releases_download_url: https://github.com/saltyorg/sdc/releases/download++docker_controller_release_lookup_command: |+ curl -s {{ docker_controller_releases_url }} \+ | jq -r ".assets[] | select(.name == \"sdc_linux_amd64\") \+ | .browser_download_url"++# Legacy docker_controller_venv_path: "/srv/docker-controller/venv" ################################ # Docker DNS ################################ -docker_dns_python_version: "3.10"+docker_dns_binary_path: "/usr/local/bin/sdhm"++docker_dns_releases_url: "{{ svm }}https://api.github.com/repos/saltyorg/sdhm/releases/latest"++docker_dns_releases_download_url: https://github.com/saltyorg/sdhm/releases/download++docker_dns_release_lookup_command: |+ curl -s {{ docker_dns_releases_url }} \+ | jq -r ".assets[] | select(.name == \"sdhm_linux_amd64\") \+ | .browser_download_url"++docker_dns_ports_8090: "{{ port_lookup_8090.meta.port+ if (port_lookup_8090.meta.port is defined) and (port_lookup_8090.meta.port | trim | length > 0)+ else '8090' }}"++docker_dns_networks:+ - "saltbox"++docker_dns_periodic_validation: "5m"
modified
roles/docker/tasks/main.yml
@@ -30,8 +30,8 @@ - name: Get Docker service state ansible.builtin.set_fact:- docker_service_running: "{{ (services['docker.service'] is defined) and (services['docker.service']['state'] == 'running') }}"- docker_controller_service_running: "{{ (services['saltbox_managed_docker_controller.service'] is defined) and (services['saltbox_managed_docker_controller.service']['state'] == 'running') }}"+ docker_service_running: "{{ (ansible_facts['services']['docker.service'] is defined) and (ansible_facts['services']['docker.service']['state'] == 'running') }}"+ docker_controller_service_running: "{{ (ansible_facts['services']['saltbox_managed_docker_controller.service'] is defined) and (ansible_facts['services']['saltbox_managed_docker_controller.service']['state'] == 'running') }}" - name: Tasks for when Docker exists and is running when: docker_service_check@@ -90,10 +90,10 @@ rescue: - name: Install missing modules package- ansible.builtin.shell: "apt-get install --reinstall linux-modules-{{ ansible_kernel }}"+ ansible.builtin.shell: "apt-get install --reinstall linux-modules-{{ ansible_facts['kernel'] }}" - name: "Import Daemon Tasks"- ansible.builtin.import_tasks: "subtasks/daemon.yml"+ ansible.builtin.include_tasks: "subtasks/daemon.yml" - name: "Import Docker Binary tasks" ansible.builtin.include_tasks: "subtasks/binary/binary.yml"@@ -103,7 +103,7 @@ when: ('btrfs' in var_lib_file_system.stdout) - name: "Import Nvidia tasks"- ansible.builtin.import_tasks: "subtasks/nvidia.yml"+ ansible.builtin.include_tasks: "subtasks/nvidia.yml" when: use_nvidia - name: Block Docker Controller@@ -132,7 +132,7 @@ timeout: "60" - name: "Import Controller tasks"- ansible.builtin.import_tasks: "subtasks/controller.yml"+ ansible.builtin.include_tasks: "subtasks/controller.yml" - name: Gather list of running Docker containers (before Network Tasks) ansible.builtin.shell: "docker ps --format '{{ '{{' }} .Names{{ '}}' }}' | sort | xargs echo -n"@@ -149,10 +149,10 @@ when: (docker_running_containers_before_network_tasks_ps.stdout | trim | length > 0) - name: "Import Docker Network tasks"- ansible.builtin.import_tasks: "subtasks/network.yml"+ ansible.builtin.include_tasks: "subtasks/network.yml" - name: "Import Docker Housekeeping tasks"- ansible.builtin.import_tasks: "subtasks/housekeeping.yml"+ ansible.builtin.include_tasks: "subtasks/housekeeping.yml" tags: docker-housekeeping - name: Check if system needs to reboot@@ -195,15 +195,15 @@ ignore_errors: true - name: "Import Docker Hub tasks"- ansible.builtin.import_tasks: "subtasks/dockerhub.yml"+ ansible.builtin.include_tasks: "subtasks/dockerhub.yml" tags: dockerhub when: dockerhub_is_enabled - name: "Import DNS tasks"- ansible.builtin.import_tasks: "subtasks/dns.yml"+ ansible.builtin.include_tasks: "subtasks/dns.yml" - name: "Import Compose tasks"- ansible.builtin.import_tasks: "subtasks/compose.yml"+ ansible.builtin.include_tasks: "subtasks/compose.yml" tags: docker-compose - name: "Install ctop"
modified
roles/docker/tasks/subtasks/binary/binary.yml
@@ -10,7 +10,7 @@ - name: Binary | Identify apt source files ansible.builtin.find: paths: /etc/apt/sources.list.d/- recurse: no+ recurse: false register: apt_source_files - name: Binary | Check if file contains 'download.docker.com'@@ -28,7 +28,7 @@ - name: Binary | Update APT package index ansible.builtin.apt:- update_cache: yes+ update_cache: true - name: Binary | Ensure '/etc/apt/keyrings' exists ansible.builtin.file:
modified
roles/docker/tasks/subtasks/binary/binary2.yml
@@ -7,32 +7,32 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Binary | Release '{{ vars[item ~ '_dpkg'] }}' from hold+- name: Binary | Release '{{ lookup('vars', item ~ '_dpkg') }}' from hold ansible.builtin.dpkg_selections:- name: "{{ vars[item ~ '_dpkg'] }}"+ name: "{{ lookup('vars', item ~ '_dpkg') }}" selection: install- when: (vars[item ~ '_dpkg'] in ansible_facts.packages)+ when: (lookup('vars', item ~ '_dpkg') in ansible_facts['packages']) -- name: Binary | Install '{{ lookup('vars', item + '_package') }}' # noqa args[module]+- name: Binary | Install '{{ lookup('vars', item + '_package') }}' ansible.builtin.apt: name: "{{ lookup('vars', item + '_package') }}"- state: "{{ docker_package_state if (item == 'docker_ce' or item == 'docker_ce_cli') else 'latest' }}"+ state: "{{ 'present' if (item == 'docker_ce' or item == 'docker_ce_cli') else 'latest' }}" update_cache: true allow_downgrade: true -- name: Binary | Put '{{ vars[item ~ '_dpkg'] }}' into hold+- name: Binary | Put '{{ lookup('vars', item ~ '_dpkg') }}' into hold ansible.builtin.dpkg_selections:- name: "{{ vars[item ~ '_dpkg'] }}"+ name: "{{ lookup('vars', item ~ '_dpkg') }}" selection: hold when: put_docker_dpkg_into_hold -- name: Binary | Get '{{ vars[item ~ '_name'] }}' version- ansible.builtin.shell: "{{ vars[item ~ '_filepath'] }} '{{ 'version' if item == 'compose_cli' else '--version' }}' | head -n 1 | awk '{ print {{ '$4' if item == 'compose_cli' else '$3' }} }' | sed 's/,$//' | sed 's/v//'"+- name: Binary | Get '{{ lookup('vars', item ~ '_name') }}' version+ ansible.builtin.shell: "{{ lookup('vars', item ~ '_filepath') }} '{{ 'version' if item == 'compose_cli' else '--version' }}' | head -n 1 | awk '{ print {{ '$4' if item == 'compose_cli' else '$3' }} }' | sed 's/,$//' | sed 's/v//'" register: binary_version ignore_errors: true changed_when: false -- name: Binary | Display '{{ vars[item ~ '_name'] }}' version+- name: Binary | Display '{{ lookup('vars', item ~ '_name') }}' version ansible.builtin.debug:- msg: "{{ vars[item ~ '_name'] }} version {{ binary_version.stdout }} installed."+ msg: "{{ lookup('vars', item ~ '_name') }} version {{ binary_version.stdout }} installed." when: binary_version is defined
modified
roles/docker/tasks/subtasks/btrfs/pseudo_file_system.yml
@@ -11,7 +11,7 @@ - name: "BTRFS | Psuedo-File-System | Determine if '/var/lib/docker' is mounted" ansible.builtin.set_fact:- var_lib_docker_mount_exists: "{{ true if (ansible_mounts | json_query('[?mount == `/var/lib/docker`]')) else false }}"+ var_lib_docker_mount_exists: "{{ true if (ansible_facts['mounts'] | json_query('[?mount == `/var/lib/docker`]')) else false }}" - name: BTRFS | Psuedo-File-System | Tasks when '/var/lib/docker' is not mounted when: (not var_lib_docker_mount_exists)
modified
roles/docker/tasks/subtasks/controller.yml
@@ -7,55 +7,39 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Controller | Execute Python role- ansible.builtin.include_role:- name: "python"- vars:- python_version: "{{ docker_controller_python_version }}"--- name: "Controller | Lookup Python {{ docker_controller_python_version }} installation"- ansible.builtin.command: "{{ python_bin }} python find {{ docker_controller_python_version }} --managed-python"- register: docker_controller_python_install_path_lookup- changed_when: false- environment: "{{ python_environment }}"- become: true- become_user: "{{ user.name }}"--- name: Controller | Set Python version- ansible.builtin.set_fact:- docker_controller_python_install_path: "{{ docker_controller_python_install_path_lookup.stdout }}"- - name: Controller | Delete venv folder ansible.builtin.file: path: "{{ docker_controller_venv_path }}" state: absent -- name: Controller | Create venv- ansible.builtin.command:- cmd: "{{ docker_controller_python_install_path }} -m venv {{ docker_controller_venv_path }}"+- name: Controller | Get URL for latest sdc release+ ansible.builtin.shell: "{{ docker_controller_release_lookup_command }}" args:- creates: "{{ docker_controller_venv_path }}"- become: true- become_user: "{{ user.name }}"+ executable: /bin/bash+ register: docker_controller_download_url -- name: Controller | Install pip requirements- ansible.builtin.pip:- requirements: "/srv/git/saltbox/requirements/requirements-docker-controller.txt"- virtualenv: "{{ docker_controller_venv_path }}"- virtualenv_command: "{{ docker_controller_venv_path }}/bin/python3 -m pip"- become: true- become_user: "{{ user.name }}"+- name: Controller | Download sdc+ ansible.builtin.get_url:+ url: "{{ docker_controller_download_url.stdout }}"+ dest: "{{ docker_controller_binary_path }}"+ owner: "root"+ group: "root"+ mode: "0755"+ register: x+ until: "x is not failed"+ retries: "{{ ansible_retry_count+ if (not continuous_integration)+ else ansible_retry_count_ci }}"+ delay: 10 -- name: Controller | Find pip3 path- ansible.builtin.find:- paths: "/srv/docker-controller/venv"- recurse: yes- patterns: 'uvicorn'- register: docker_controller_venv_files+- name: Controller | Get sdc binary version+ ansible.builtin.shell: "{{ docker_controller_binary_path }} --version | awk '{ print $3 }'"+ register: sdc_binary_version+ changed_when: false -- name: Controller | Path- ansible.builtin.set_fact:- docker_controller_uvicorn_path: "{{ docker_controller_venv_files.files[0].path }}"+- name: Controller | Display sdc binary version+ ansible.builtin.debug:+ msg: "sdc {{ sdc_binary_version.stdout }} installed." - name: Controller | Import 'saltbox_managed_docker_controller.service' ansible.builtin.template:@@ -75,7 +59,7 @@ ansible.builtin.uri: url: "{{ docker_controller_url }}/ping" method: GET- return_content: yes+ return_content: true status_code: 200 register: result retries: 120
modified
roles/docker/tasks/subtasks/dns.yml
@@ -12,36 +12,48 @@ name: docker-update-hosts state: stopped enabled: false- when: (services['docker-update-hosts.service'] is defined)+ when: (ansible_facts['services']['docker-update-hosts.service'] is defined) - name: "DNS | Remove docker-update-hosts.service" ansible.builtin.file: path: /etc/systemd/system/docker-update-hosts.service state: absent -- name: "DNS | Delete 'docker-update-hosts'"- ansible.builtin.file:- path: "/usr/local/bin/docker-update-hosts"- state: absent+- name: "DNS | Get URL for latest sdhm release"+ ansible.builtin.shell: "{{ docker_dns_release_lookup_command }}"+ args:+ executable: /bin/bash+ register: docker_dns_download_url -- name: "DNS | Execute Python role"- ansible.builtin.include_role:- name: "python"- vars:- python_version: "{{ docker_dns_python_version }}"- when: (docker_dns_python_version != docker_controller_python_version)+- name: "DNS | Download sdhm"+ ansible.builtin.get_url:+ url: "{{ docker_dns_download_url.stdout }}"+ dest: "{{ docker_dns_binary_path }}"+ owner: "root"+ group: "root"+ mode: "0755"+ register: x+ until: "x is not failed"+ retries: "{{ ansible_retry_count+ if (not continuous_integration)+ else ansible_retry_count_ci }}"+ delay: 10 -- name: "DNS | Lookup Python {{ docker_dns_python_version }} installation"- ansible.builtin.command: "{{ python_bin }} python find {{ docker_dns_python_version }} --managed-python"- register: docker_dns_python_install_path_lookup+- name: "DNS | Get sdhm binary version"+ ansible.builtin.shell: "{{ docker_dns_binary_path }} --version | awk '{ print $3 }'"+ register: sdhm_binary_version changed_when: false- environment: "{{ python_environment }}"- become: true- become_user: "{{ user.name }}" -- name: "DNS | Set Python version"- ansible.builtin.set_fact:- docker_dns_python_install_path: "{{ docker_dns_python_install_path_lookup.stdout }}"+- name: "DNS | Display sdhm binary version"+ ansible.builtin.debug:+ msg: "sdhm {{ sdhm_binary_version.stdout }} installed."++- name: "DNS | Get next available port within the range of '8090-8180'"+ find_open_port:+ low_bound: "8090"+ high_bound: "8180"+ protocol: both+ register: port_lookup_8090 - name: "DNS | Import 'saltbox_managed_docker_update_hosts.service'" ansible.builtin.template:
modified
roles/docker/templates/docker-controller-helper.service.j2
@@ -15,7 +15,7 @@ After=docker.service saltbox_managed_docker_controller.service [Service]-ExecStart=/srv/ansible/venv/bin/python3 /srv/git/saltbox/scripts/saltbox_docker_controller_helper.py+ExecStart={{ docker_controller_binary_path }} helper --controller-url "http://127.0.0.1:3377" [Install] WantedBy=multi-user.target
modified
roles/docker/templates/docker-controller.service.j2
@@ -14,8 +14,7 @@ [Service] Type=simple-WorkingDirectory=/srv/git/saltbox/scripts-ExecStart={{ docker_controller_uvicorn_path }} saltbox_docker_controller:app --host 127.0.0.1 --port 3377+ExecStart={{ docker_controller_binary_path }} server --host 127.0.0.1 --port 3377 Restart=on-failure RestartSec=5s
modified
roles/docker/templates/docker-update-hosts.service.j2
@@ -9,12 +9,15 @@ ######################################################################### [Unit]-Description=Saltbox Docker Hosts DNS Resolution Helper-BindsTo=docker.service+Description=Saltbox Docker Hosts Manager After=docker.service+Requires=docker.service [Service]-ExecStart={{ docker_dns_python_install_path }} /srv/git/saltbox/roles/docker/files/docker-update-hosts.py {{ docker_update_hosts_service_runtime_max }}+Type=simple+ExecStart={{ docker_dns_binary_path }} --networks {{ docker_dns_networks | join(',') }} --interval {{ docker_dns_periodic_validation }} --health-port {{ docker_dns_ports_8090 }}+Restart=always+RestartSec=10 [Install] WantedBy=multi-user.target
modified
roles/docker_socket_proxy/defaults/main.yml
@@ -18,96 +18,53 @@ ################################ # Container-docker_socket_proxy_docker_container: "{{ docker_socket_proxy_name }}"+docker_socket_proxy_role_docker_container: "{{ docker_socket_proxy_name }}" # Image-docker_socket_proxy_docker_image_pull: true-docker_socket_proxy_docker_image_tag: "latest"-docker_socket_proxy_docker_image: "lscr.io/linuxserver/socket-proxy:{{ docker_socket_proxy_docker_image_tag }}"--# Ports-docker_socket_proxy_docker_ports_defaults: []-docker_socket_proxy_docker_ports_custom: []-docker_socket_proxy_docker_ports: "{{ docker_socket_proxy_docker_ports_defaults- + docker_socket_proxy_docker_ports_custom }}"+docker_socket_proxy_role_docker_image_pull: true+docker_socket_proxy_role_docker_image_repo: "lscr.io/linuxserver/socket-proxy"+docker_socket_proxy_role_docker_image_tag: "latest"+docker_socket_proxy_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='docker_socket_proxy') }}:{{ lookup('role_var', '_docker_image_tag', role='docker_socket_proxy') }}" # Envs-docker_socket_proxy_docker_envs_default:+docker_socket_proxy_role_docker_envs_default: TZ: "{{ tz }}"- DISABLE_IPV6: "{{ '0' if dns.ipv6 else '1' }}"-docker_socket_proxy_docker_envs_custom: {}-docker_socket_proxy_docker_envs: "{{ docker_socket_proxy_docker_envs_default- | combine(docker_socket_proxy_docker_envs_custom) }}"--# Commands-docker_socket_proxy_docker_commands_default: []-docker_socket_proxy_docker_commands_custom: []-docker_socket_proxy_docker_commands: "{{ docker_socket_proxy_docker_commands_default- + docker_socket_proxy_docker_commands_custom }}"+ DISABLE_IPV6: "{{ '0' if dns_ipv6_enabled else '1' }}"+docker_socket_proxy_role_docker_envs_custom: {}+docker_socket_proxy_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='docker_socket_proxy')+ | combine(lookup('role_var', '_docker_envs_custom', role='docker_socket_proxy')) }}" # Volumes-docker_socket_proxy_docker_volumes_default:- - "/var/run/docker.sock:/var/run/docker.sock:ro"-docker_socket_proxy_docker_volumes_custom: []-docker_socket_proxy_docker_volumes: "{{ docker_socket_proxy_docker_volumes_default- + docker_socket_proxy_docker_volumes_custom }}"+docker_socket_proxy_role_docker_volumes_default:+ - "/var/run/docker.sock:/var/run/docker.sock"+docker_socket_proxy_role_docker_volumes_custom: []+docker_socket_proxy_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='docker_socket_proxy')+ + lookup('role_var', '_docker_volumes_custom', role='docker_socket_proxy') }}" # Mounts-docker_socket_proxy_docker_mounts_default:+docker_socket_proxy_role_docker_mounts_default: - target: /run type: tmpfs-docker_socket_proxy_docker_mounts_custom: []-docker_socket_proxy_docker_mounts: "{{ docker_socket_proxy_docker_mounts_default- + docker_socket_proxy_docker_mounts_custom }}"--# Devices-docker_socket_proxy_docker_devices_default: []-docker_socket_proxy_docker_devices_custom: []-docker_socket_proxy_docker_devices: "{{ docker_socket_proxy_docker_devices_default- + docker_socket_proxy_docker_devices_custom }}"--# Hosts-docker_socket_proxy_docker_hosts_default: {}-docker_socket_proxy_docker_hosts_custom: {}-docker_socket_proxy_docker_hosts: "{{ docker_hosts_common- | combine(docker_socket_proxy_docker_hosts_default)- | combine(docker_socket_proxy_docker_hosts_custom) }}"--# Labels-docker_socket_proxy_docker_labels_default: {}-docker_socket_proxy_docker_labels_custom: {}-docker_socket_proxy_docker_labels: "{{ docker_labels_common- | combine(docker_socket_proxy_docker_labels_default)- | combine(docker_socket_proxy_docker_labels_custom) }}"+docker_socket_proxy_role_docker_mounts_custom: []+docker_socket_proxy_role_docker_mounts: "{{ lookup('role_var', '_docker_mounts_default', role='docker_socket_proxy')+ + lookup('role_var', '_docker_mounts_custom', role='docker_socket_proxy') }}" # Hostname-docker_socket_proxy_docker_hostname: "{{ docker_socket_proxy_name }}"+docker_socket_proxy_role_docker_hostname: "{{ docker_socket_proxy_name }}" # Networks-docker_socket_proxy_docker_networks_alias: "{{ docker_socket_proxy_name }}"-docker_socket_proxy_docker_networks_default: []-docker_socket_proxy_docker_networks_custom: []-docker_socket_proxy_docker_networks: "{{ docker_networks_common- + docker_socket_proxy_docker_networks_default- + docker_socket_proxy_docker_networks_custom }}"--# Capabilities-docker_socket_proxy_docker_capabilities_default: []-docker_socket_proxy_docker_capabilities_custom: []-docker_socket_proxy_docker_capabilities: "{{ docker_socket_proxy_docker_capabilities_default- + docker_socket_proxy_docker_capabilities_custom }}"--# Security Opts-docker_socket_proxy_docker_security_opts_default: []-docker_socket_proxy_docker_security_opts_custom: []-docker_socket_proxy_docker_security_opts: "{{ docker_socket_proxy_docker_security_opts_default- + docker_socket_proxy_docker_security_opts_custom }}"+docker_socket_proxy_role_docker_networks_alias: "{{ docker_socket_proxy_name }}"+docker_socket_proxy_role_docker_networks_default: []+docker_socket_proxy_role_docker_networks_custom: []+docker_socket_proxy_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='docker_socket_proxy')+ + lookup('role_var', '_docker_networks_custom', role='docker_socket_proxy') }}" # Restart Policy-docker_socket_proxy_docker_restart_policy: unless-stopped+docker_socket_proxy_role_docker_restart_policy: unless-stopped # State-docker_socket_proxy_docker_state: started+docker_socket_proxy_role_docker_state: started # Read Only Filesystem-docker_socket_proxy_docker_read_only: true+docker_socket_proxy_role_docker_read_only: true
modified
roles/docker_socket_proxy/tasks/main.yml
@@ -1,8 +1,7 @@ #########################################################################-# Title: Sandbox: docker_socket_proxy #+# Title: Saltbox: docker_socket_proxy # # Author(s): salty, JigSawFr #-# URL: https://github.com/saltyorg/Sandbox #-# URL: https://github.com/Tecnativa/docker-socket-proxy #+# URL: https://github.com/saltyorg/Saltbox # # -- # ######################################################################### # GNU General Public License v3.0 #
modified
roles/dozzle/defaults/main.yml
@@ -7,6 +7,12 @@ # GNU General Public License v3.0 # ########################################################################## ---+################################+# Basics+################################++dozzle_name: dozzle+ ################################ # Docker Socket Proxy ################################@@ -19,169 +25,129 @@ # Settings ################################ -dozzle_additional_hosts: ""-dozzle_agent_hosts: ""-dozzle_agent_mode: false--################################-# Basics-################################--dozzle_name: dozzle+dozzle_role_additional_hosts: ""+dozzle_role_agent_hosts: ""+dozzle_role_agent_mode: false ################################ # Paths ################################ -dozzle_paths_folder: "{{ dozzle_name }}"-dozzle_paths_location: "{{ server_appdata_path }}/{{ dozzle_paths_folder }}"-dozzle_paths_folders_list:- - "{{ dozzle_paths_location }}"+dozzle_role_paths_folder: "{{ dozzle_name }}"+dozzle_role_paths_location: "{{ server_appdata_path }}/{{ dozzle_role_paths_folder }}"+dozzle_role_paths_folders_list:+ - "{{ dozzle_role_paths_location }}" ################################ # Web ################################ -dozzle_web_subdomain: "{{ dozzle_name }}"-dozzle_web_domain: "{{ user.domain }}"-dozzle_web_port: "8080"-dozzle_web_url: "{{ 'https://' + (dozzle_web_subdomain + '.' + dozzle_web_domain- if (dozzle_web_subdomain | length > 0)- else dozzle_web_domain) }}"+dozzle_role_web_subdomain: "{{ dozzle_name }}"+dozzle_role_web_domain: "{{ user.domain }}"+dozzle_role_web_port: "8080"+dozzle_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='dozzle') + '.' + lookup('role_var', '_web_domain', role='dozzle')+ if (lookup('role_var', '_web_subdomain', role='dozzle') | length > 0)+ else lookup('role_var', '_web_domain', role='dozzle')) }}" ################################ # DNS ################################ -dozzle_dns_record: "{{ dozzle_web_subdomain }}"-dozzle_dns_zone: "{{ dozzle_web_domain }}"-dozzle_dns_proxy: "{{ dns.proxied }}"+dozzle_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='dozzle') }}"+dozzle_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='dozzle') }}"+dozzle_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -dozzle_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-dozzle_traefik_middleware_default: "{{ traefik_default_middleware- + (',dropsecurityheaders@file,themepark-' + lookup('vars', dozzle_name + '_name', default=dozzle_name)- if (dozzle_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-dozzle_traefik_middleware_custom: ""-dozzle_traefik_certresolver: "{{ traefik_default_certresolver }}"-dozzle_traefik_enabled: true-dozzle_traefik_api_enabled: false-dozzle_traefik_api_endpoint: ""+dozzle_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+dozzle_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',dropsecurityheaders@file,themepark-' + dozzle_name+ if (lookup('role_var', '_themepark_enabled', role='dozzle') and global_themepark_plugin_enabled)+ else '') }}"+dozzle_role_traefik_middleware_custom: ""+dozzle_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+dozzle_role_traefik_enabled: true+dozzle_role_traefik_api_enabled: false+dozzle_role_traefik_api_endpoint: "" ################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-dozzle_themepark_enabled: false-dozzle_themepark_app: "dozzle"-dozzle_themepark_theme: "{{ global_themepark_theme }}"-dozzle_themepark_domain: "{{ global_themepark_domain }}"-dozzle_themepark_addons: []+dozzle_role_themepark_enabled: false+dozzle_role_themepark_app: "dozzle"+dozzle_role_themepark_theme: "{{ global_themepark_theme }}"+dozzle_role_themepark_domain: "{{ global_themepark_domain }}"+dozzle_role_themepark_addons: [] ################################ # Docker ################################ # Container-dozzle_docker_container: "{{ dozzle_name }}"+dozzle_role_docker_container: "{{ dozzle_name }}" # Image-dozzle_docker_image_pull: true-dozzle_docker_image_tag: "latest"-dozzle_docker_image: "amir20/dozzle:{{ dozzle_docker_image_tag }}"--# Ports-dozzle_docker_ports_defaults: []-dozzle_docker_ports_custom: []-dozzle_docker_ports: "{{ dozzle_docker_ports_defaults- + dozzle_docker_ports_custom }}"+dozzle_role_docker_image_pull: true+dozzle_role_docker_image_repo: "amir20/dozzle"+dozzle_role_docker_image_tag: "latest"+dozzle_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='dozzle') }}:{{ lookup('role_var', '_docker_image_tag', role='dozzle') }}" # Envs-dozzle_docker_envs_default:- DOZZLE_AUTH_PROVIDER: "{{ 'forward-proxy' if (dozzle_traefik_sso_middleware | length > 0) else omit }}"- DOZZLE_REMOTE_AGENT: "{{ dozzle_agent_hosts if (dozzle_additional_hosts | length > 0) else omit }}"- DOZZLE_REMOTE_HOST: "{{ 'tcp://' + dozzle_name + '-docker-socket-proxy:2375|' + traefik_host + ',' + dozzle_additional_hosts- if (dozzle_additional_hosts | length > 0)+dozzle_role_docker_envs_default:+ DOZZLE_AUTH_PROVIDER: "{{ 'forward-proxy' if (lookup('role_var', '_traefik_sso_middleware', role='dozzle') | length > 0) else omit }}"+ DOZZLE_REMOTE_AGENT: "{{ lookup('role_var', '_agent_hosts', role='dozzle') if (lookup('role_var', '_additional_hosts', role='dozzle') | length > 0) else omit }}"+ DOZZLE_REMOTE_HOST: "{{ 'tcp://' + dozzle_name + '-docker-socket-proxy:2375|' + traefik_host + ',' + lookup('role_var', '_additional_hosts', role='dozzle')+ if (lookup('role_var', '_additional_hosts', role='dozzle') | length > 0) else 'tcp://' + dozzle_name + '-docker-socket-proxy:2375|' + traefik_host }}"-dozzle_docker_envs_custom: {}-dozzle_docker_envs: "{{ dozzle_docker_envs_default- | combine(dozzle_docker_envs_custom) }}"+ DOZZLE_AUTH_HEADER_USER: "{{ 'X-authentik-username' if 'authentik' in lookup('role_var', '_traefik_sso_middleware', role='dozzle') else omit }}"+ DOZZLE_AUTH_HEADER_EMAIL: "{{ 'X-authentik-email' if 'authentik' in lookup('role_var', '_traefik_sso_middleware', role='dozzle') else omit }}"+ DOZZLE_AUTH_HEADER_NAME: "{{ 'X-authentik-name' if 'authentik' in lookup('role_var', '_traefik_sso_middleware', role='dozzle') else omit }}"+dozzle_role_docker_envs_custom: {}+dozzle_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='dozzle')+ | combine(lookup('role_var', '_docker_envs_custom', role='dozzle')) }}" # Commands-dozzle_docker_commands_agent: "agent"-dozzle_docker_commands_default: []-dozzle_docker_commands_custom: []-dozzle_docker_commands: "{{ dozzle_docker_commands_agent- + dozzle_docker_commands_default- + dozzle_docker_commands_custom- if (dozzle_agent_mode)- else dozzle_docker_commands_default- + dozzle_docker_commands_custom }}"--# Volumes-dozzle_docker_volumes_default: []-dozzle_docker_volumes_custom: []-dozzle_docker_volumes: "{{ dozzle_docker_volumes_default- + dozzle_docker_volumes_custom }}"--# Devices-dozzle_docker_devices_default: []-dozzle_docker_devices_custom: []-dozzle_docker_devices: "{{ dozzle_docker_devices_default- + dozzle_docker_devices_custom }}"--# Hosts-dozzle_docker_hosts_default: {}-dozzle_docker_hosts_custom: {}-dozzle_docker_hosts: "{{ docker_hosts_common- | combine(dozzle_docker_hosts_default)- | combine(dozzle_docker_hosts_custom) }}"+dozzle_role_docker_commands_agent: "agent"+dozzle_role_docker_commands_default: []+dozzle_role_docker_commands_custom: []+dozzle_role_docker_commands: "{{ lookup('role_var', '_docker_commands_agent', role='dozzle')+ + lookup('role_var', '_docker_commands_default', role='dozzle')+ + lookup('role_var', '_docker_commands_custom', role='dozzle')+ if lookup('role_var', '_agent_mode', role='dozzle')+ else lookup('role_var', '_docker_commands_default', role='dozzle')+ + lookup('role_var', '_docker_commands_custom', role='dozzle') }}" # Labels-dozzle_docker_labels_default: {}-dozzle_docker_labels_custom: {}-dozzle_docker_labels: "{{ docker_labels_common- | combine(dozzle_docker_labels_default)- | combine((traefik_themepark_labels- if (dozzle_themepark_enabled and global_themepark_plugin_enabled)- else {}),- dozzle_docker_labels_custom) }}"+dozzle_role_docker_labels_default: {}+dozzle_role_docker_labels_custom: {}+dozzle_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='dozzle')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='dozzle') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='dozzle')) }}" # Hostname-dozzle_docker_hostname: "{{ dozzle_name }}"+dozzle_role_docker_hostname: "{{ dozzle_name }}" # Networks-dozzle_docker_networks_alias: "{{ dozzle_name }}"-dozzle_docker_networks_default: []-dozzle_docker_networks_custom: []-dozzle_docker_networks: "{{ docker_networks_common- + dozzle_docker_networks_default- + dozzle_docker_networks_custom }}"--# Capabilities-dozzle_docker_capabilities_default: []-dozzle_docker_capabilities_custom: []-dozzle_docker_capabilities: "{{ dozzle_docker_capabilities_default- + dozzle_docker_capabilities_custom }}"--# Security Opts-dozzle_docker_security_opts_default: []-dozzle_docker_security_opts_custom: []-dozzle_docker_security_opts: "{{ dozzle_docker_security_opts_default- + dozzle_docker_security_opts_custom }}"+dozzle_role_docker_networks_alias: "{{ dozzle_name }}"+dozzle_role_docker_networks_default: []+dozzle_role_docker_networks_custom: []+dozzle_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='dozzle')+ + lookup('role_var', '_docker_networks_custom', role='dozzle') }}" # Restart Policy-dozzle_docker_restart_policy: unless-stopped+dozzle_role_docker_restart_policy: unless-stopped # State-dozzle_docker_state: started+dozzle_role_docker_state: started # Dependencies-dozzle_depends_on: "{{ dozzle_name }}-docker-socket-proxy"-dozzle_depends_on_delay: "0"-dozzle_depends_on_healthchecks: "false"+dozzle_role_depends_on: "{{ dozzle_name }}-docker-socket-proxy"+dozzle_role_depends_on_delay: "0"+dozzle_role_depends_on_healthchecks: "false"
modified
roles/dozzle/tasks/main.yml
@@ -12,14 +12,14 @@ name: docker_socket_proxy vars: docker_socket_proxy_name: "{{ dozzle_name }}-docker-socket-proxy"- docker_socket_proxy_docker_envs_custom: "{{ dozzle_docker_socket_proxy_envs }}"+ docker_socket_proxy_role_docker_envs_custom: "{{ dozzle_docker_socket_proxy_envs }}" - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"
modified
roles/emby/defaults/main.yml
@@ -17,197 +17,154 @@ # Settings ################################ -emby_config_cache_size: 1024+emby_role_config_cache_size: 1024 ################################ # Paths ################################ -emby_paths_folder: "{{ emby_name }}"-emby_paths_location: "{{ server_appdata_path }}/{{ emby_paths_folder }}"-emby_paths_transcodes_location: "{{ transcodes_path }}/{{ emby_paths_folder }}"-emby_paths_folders_list:- - "{{ emby_paths_location }}"- - "{{ emby_paths_location }}/config"- - "{{ emby_paths_location }}/config/users"- - "{{ emby_paths_transcodes_location }}"-emby_paths_config_location: "{{ emby_paths_location }}/config/system.xml"-emby_paths_dlna_xml_location: "{{ emby_paths_location }}/config/dlna.xml"+emby_role_paths_folder: "{{ emby_name }}"+emby_role_paths_location: "{{ server_appdata_path }}/{{ emby_role_paths_folder }}"+emby_role_paths_transcodes_location: "{{ transcodes_path }}/{{ emby_role_paths_folder }}"+emby_role_paths_folders_list:+ - "{{ emby_role_paths_location }}"+ - "{{ emby_role_paths_location }}/config"+ - "{{ emby_role_paths_location }}/config/users"+ - "{{ emby_role_paths_transcodes_location }}"+emby_role_paths_config_location: "{{ emby_role_paths_location }}/config/system.xml"+emby_role_paths_dlna_xml_location: "{{ emby_role_paths_location }}/config/dlna.xml" ################################ # Web ################################ -emby_web_subdomain: "{{ emby_name }}"-emby_web_domain: "{{ user.domain }}"-emby_web_port: "8096"-emby_web_url: "{{ 'https://' + (lookup('vars', emby_name + '_web_subdomain', default=emby_web_subdomain) + '.' + lookup('vars', emby_name + '_web_domain', default=emby_web_domain)- if (lookup('vars', emby_name + '_web_subdomain', default=emby_web_subdomain) | length > 0)- else lookup('vars', emby_name + '_web_domain', default=emby_web_domain)) }}"+emby_role_web_subdomain: "{{ emby_name }}"+emby_role_web_domain: "{{ user.domain }}"+emby_role_web_port: "8096"+emby_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='emby') + '.' + lookup('role_var', '_web_domain', role='emby')+ if (lookup('role_var', '_web_subdomain', role='emby') | length > 0)+ else lookup('role_var', '_web_domain', role='emby')) }}" ################################ # DNS ################################ -emby_dns_record: "{{ lookup('vars', emby_name + '_web_subdomain', default=emby_web_subdomain) }}"-emby_dns_zone: "{{ lookup('vars', emby_name + '_web_domain', default=emby_web_domain) }}"-emby_dns_proxy: "{{ dns.proxied }}"+emby_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='emby') }}"+emby_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='emby') }}"+emby_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -emby_traefik_sso_middleware: ""-emby_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', emby_name + '_name', default=emby_name)- if (emby_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-emby_traefik_middleware_custom: ""-emby_traefik_certresolver: "{{ traefik_default_certresolver }}"-emby_traefik_enabled: true-emby_traefik_gzip_enabled: false-emby_traefik_api_enabled: false-emby_traefik_api_endpoint: ""+emby_role_traefik_sso_middleware: ""+emby_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + emby_name+ if (lookup('role_var', '_themepark_enabled', role='emby') and global_themepark_plugin_enabled)+ else '') }}"+emby_role_traefik_middleware_custom: ""+emby_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+emby_role_traefik_enabled: true+emby_role_traefik_gzip_enabled: false+emby_role_traefik_api_enabled: false+emby_role_traefik_api_endpoint: "" ################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-emby_themepark_enabled: false-emby_themepark_app: "emby"-emby_themepark_theme: "{{ global_themepark_theme }}"-emby_themepark_domain: "{{ global_themepark_domain }}"-emby_themepark_addons: []+emby_role_themepark_enabled: false+emby_role_themepark_app: "emby"+emby_role_themepark_theme: "{{ global_themepark_theme }}"+emby_role_themepark_domain: "{{ global_themepark_domain }}"+emby_role_themepark_addons: [] ################################ # Config ################################ -emby_config_settings_default:+emby_role_config_settings_default: - { xpath: 'IsBehindProxy', value: 'true' }- - { xpath: 'WanDdns', value: '{{ lookup("vars", emby_name + "_web_subdomain", default=emby_web_subdomain) }}.{{ lookup("vars", emby_name + "_web_domain", default=emby_web_domain) }}' }+ - { xpath: 'WanDdns', value: '{{ lookup("role_var", "_web_subdomain", role="emby") }}.{{ lookup("role_var", "_web_domain", role="emby") }}' } - { xpath: 'PublicPort', value: '80' } - { xpath: 'PublicHttpsPort', value: '443' } - { xpath: 'EnableHttps', value: 'true' } - { xpath: 'RequireHttps', value: 'false' } - { xpath: 'EnableUPnP', value: 'false' }- - { xpath: 'DatabaseCacheSizeMB', value: '{{ lookup("vars", emby_name + "_config_cache_size", default=emby_config_cache_size) }}' }+ - { xpath: 'DatabaseCacheSizeMB', value: '{{ lookup("role_var", "_config_cache_size", role="emby") | string }}' } -emby_config_settings_custom: []+emby_role_config_settings_custom: [] -emby_config_settings_list: "{{ lookup('vars', emby_name + '_config_settings_default', default=emby_config_settings_default) + lookup('vars', emby_name + '_config_settings_custom', default=emby_config_settings_custom) }}"+emby_role_config_settings_list: "{{ lookup('role_var', '_config_settings_default', role='emby') + lookup('role_var', '_config_settings_custom', role='emby') }}" ################################ # Docker ################################ # Container-emby_docker_container: "{{ emby_name }}"+emby_role_docker_container: "{{ emby_name }}" # Image-emby_docker_image_pull: true-emby_docker_image_repo: "lscr.io/linuxserver/emby"-emby_docker_image_tag: "latest"-emby_docker_image: "{{ lookup('vars', emby_name + '_docker_image_repo', default=emby_docker_image_repo)- + ':' + lookup('vars', emby_name + '_docker_image_tag', default=emby_docker_image_tag) }}"--# Ports-emby_docker_ports_defaults: []-emby_docker_ports_custom: []-emby_docker_ports: "{{ lookup('vars', emby_name + '_docker_ports_defaults', default=emby_docker_ports_defaults)- + lookup('vars', emby_name + '_docker_ports_custom', default=emby_docker_ports_custom) }}"+emby_role_docker_image_pull: true+emby_role_docker_image_repo: "lscr.io/linuxserver/emby"+emby_role_docker_image_tag: "latest"+emby_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='emby') }}:{{ lookup('role_var', '_docker_image_tag', role='emby') }}" # Envs-emby_docker_envs_default:+emby_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" TZ: "{{ tz }}"-emby_docker_envs_custom: {}-emby_docker_envs: "{{ lookup('vars', emby_name + '_docker_envs_default', default=emby_docker_envs_default)- | combine(lookup('vars', emby_name + '_docker_envs_custom', default=emby_docker_envs_custom)) }}"--# Commands-emby_docker_commands_default: []-emby_docker_commands_custom: []-emby_docker_commands: "{{ lookup('vars', emby_name + '_docker_commands_default', default=emby_docker_commands_default)- + lookup('vars', emby_name + '_docker_commands_custom', default=emby_docker_commands_custom) }}"+emby_role_docker_envs_custom: {}+emby_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='emby')+ | combine(lookup('role_var', '_docker_envs_custom', role='emby')) }}" # Volumes-emby_docker_volumes_default:- - "{{ emby_paths_location }}:/config"+emby_role_docker_volumes_default:+ - "{{ emby_role_paths_location }}:/config" - "{{ server_appdata_path }}/scripts:/scripts" - "/dev/shm:/dev/shm"- - "{{ emby_paths_transcodes_location }}:/transcode"-emby_docker_volumes_legacy:+ - "{{ emby_role_paths_transcodes_location }}:/transcode"+emby_role_docker_volumes_legacy: - "/mnt/unionfs/Media:/data"-emby_docker_volumes_custom: []-emby_docker_volumes: "{{ lookup('vars', emby_name + '_docker_volumes_default', default=emby_docker_volumes_default)- + lookup('vars', emby_name + '_docker_volumes_custom', default=emby_docker_volumes_custom)- + (lookup('vars', emby_name + '_docker_volumes_legacy', default=emby_docker_volumes_legacy)- if docker_legacy_volume- else []) }}"+emby_role_docker_volumes_custom: []+emby_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='emby')+ + lookup('role_var', '_docker_volumes_custom', role='emby')+ + (lookup('role_var', '_docker_volumes_legacy', role='emby')+ if docker_legacy_volume+ else []) }}" # Mounts-emby_docker_mounts_default:+emby_role_docker_mounts_default: - target: /tmp type: tmpfs-emby_docker_mounts_custom: []-emby_docker_mounts: "{{ lookup('vars', emby_name + '_docker_mounts_default', default=emby_docker_mounts_default)- + lookup('vars', emby_name + '_docker_mounts_custom', default=emby_docker_mounts_custom) }}"--# Devices-emby_docker_devices_default: []-emby_docker_devices_custom: []-emby_docker_devices: "{{ lookup('vars', emby_name + '_docker_devices_default', default=emby_docker_devices_default)- + lookup('vars', emby_name + '_docker_devices_custom', default=emby_docker_devices_custom) }}"--# Hosts-emby_docker_hosts_default: {}-emby_docker_hosts_custom: {}-emby_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', emby_name + '_docker_hosts_default', default=emby_docker_hosts_default))- | combine(lookup('vars', emby_name + '_docker_hosts_custom', default=emby_docker_hosts_custom)) }}"+emby_role_docker_mounts_custom: []+emby_role_docker_mounts: "{{ lookup('role_var', '_docker_mounts_default', role='emby')+ + lookup('role_var', '_docker_mounts_custom', role='emby') }}" # Labels-emby_docker_labels_default: {}-emby_docker_labels_custom: {}-emby_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', emby_name + '_docker_labels_default', default=emby_docker_labels_default))- | combine((traefik_themepark_labels- if (emby_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', emby_name + '_docker_labels_custom', default=emby_docker_labels_custom)) }}"+emby_role_docker_labels_default: {}+emby_role_docker_labels_custom: {}+emby_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='emby')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='emby') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='emby')) }}" # Hostname-emby_docker_hostname: "{{ emby_name }}"--# Network Mode-emby_docker_network_mode_default: "{{ docker_networks_name_common }}"-emby_docker_network_mode: "{{ lookup('vars', emby_name + '_docker_network_mode_default', default=emby_docker_network_mode_default) }}"+emby_role_docker_hostname: "{{ emby_name }}" # Networks-emby_docker_networks_alias: "{{ emby_name }}"-emby_docker_networks_default: []-emby_docker_networks_custom: []-emby_docker_networks: "{{ docker_networks_common- + lookup('vars', emby_name + '_docker_networks_default', default=emby_docker_networks_default)- + lookup('vars', emby_name + '_docker_networks_custom', default=emby_docker_networks_custom) }}"--# Capabilities-emby_docker_capabilities_default: []-emby_docker_capabilities_custom: []-emby_docker_capabilities: "{{ lookup('vars', emby_name + '_docker_capabilities_default', default=emby_docker_capabilities_default)- + lookup('vars', emby_name + '_docker_capabilities_custom', default=emby_docker_capabilities_custom) }}"--# Security Opts-emby_docker_security_opts_default: []-emby_docker_security_opts_custom: []-emby_docker_security_opts: "{{ lookup('vars', emby_name + '_docker_security_opts_default', default=emby_docker_security_opts_default)- + lookup('vars', emby_name + '_docker_security_opts_custom', default=emby_docker_security_opts_custom) }}"+emby_role_docker_networks_alias: "{{ emby_name }}"+emby_role_docker_networks_default: []+emby_role_docker_networks_custom: []+emby_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='emby')+ + lookup('role_var', '_docker_networks_custom', role='emby') }}" # Restart Policy-emby_docker_restart_policy: unless-stopped+emby_role_docker_restart_policy: unless-stopped # State-emby_docker_state: started+emby_role_docker_state: started
modified
roles/emby/tasks/main2.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -22,10 +22,10 @@ - name: Docker Devices Task ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/set_docker_devices_variable.yml"- when: gpu.intel or use_nvidia+ when: use_intel or use_nvidia - name: Create Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: Post-Install Tasks- ansible.builtin.import_tasks: "subtasks/post-install.yml"+ ansible.builtin.include_tasks: "subtasks/post-install.yml"
modified
roles/emby/tasks/subtasks/post-install.yml
@@ -13,7 +13,7 @@ - name: Post-Install | Ensure transcodes folder has the correct permissions ansible.builtin.file:- path: "{{ emby_paths_transcodes_location }}"+ path: "{{ lookup('role_var', '_paths_transcodes_location', role='emby') }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775"@@ -22,11 +22,11 @@ - name: Post-Install | Import 'dlna.xml' ansible.builtin.copy: src: "dlna.xml"- dest: "{{ emby_paths_dlna_xml_location }}"+ dest: "{{ lookup('role_var', '_paths_dlna_xml_location', role='emby') }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664" force: false - name: Post-Install | Settings Tasks- ansible.builtin.import_tasks: "settings.yml"+ ansible.builtin.include_tasks: "subtasks/settings.yml"
modified
roles/emby/tasks/subtasks/settings.yml
@@ -9,7 +9,7 @@ --- - name: Post-Install | Wait for config file to be created ansible.builtin.wait_for:- path: "{{ emby_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='emby') }}" state: present - name: Settings | Stop Docker container@@ -17,12 +17,12 @@ - name: Settings | Update config file community.general.xml:- path: "{{ emby_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='emby') }}" xpath: "/ServerConfiguration/{{ item.xpath }}" value: "{{ item.value }}" become: true become_user: "{{ user.name }}"- loop: "{{ emby_config_settings_list }}"+ loop: "{{ lookup('role_var', '_config_settings_list', role='emby') }}" - name: Settings | Start Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_docker_container.yml"
modified
roles/error_pages/defaults/main.yml
@@ -18,61 +18,37 @@ ################################ # Template options listed here https://github.com/tarampampam/error-pages-error_pages_template: "l7"+error_pages_role_template: "l7" ################################ # Docker ################################ # Container-error_pages_docker_container: "{{ error_pages_name }}"+error_pages_role_docker_container: "{{ error_pages_name }}" # Image-error_pages_docker_image_pull: true-error_pages_docker_image_tag: "latest"-error_pages_docker_image: "tarampampam/error-pages:{{ error_pages_docker_image_tag }}"--# Ports-error_pages_docker_ports_defaults: []-error_pages_docker_ports_custom: []-error_pages_docker_ports: "{{ error_pages_docker_ports_defaults- + error_pages_docker_ports_custom }}"+error_pages_role_docker_image_pull: true+error_pages_role_docker_image_repo: "tarampampam/error-pages"+error_pages_role_docker_image_tag: "latest"+error_pages_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='error_pages') }}:{{ lookup('role_var', '_docker_image_tag', role='error_pages') }}" # Envs-error_pages_docker_envs_default:- TEMPLATE_NAME: "{{ error_pages_template }}"-error_pages_docker_envs_custom: {}-error_pages_docker_envs: "{{ error_pages_docker_envs_default- | combine(error_pages_docker_envs_custom) }}"--# Commands-error_pages_docker_commands_default: []-error_pages_docker_commands_custom: []-error_pages_docker_commands: "{{ error_pages_docker_commands_default- + error_pages_docker_commands_custom }}"+error_pages_role_docker_envs_default:+ TEMPLATE_NAME: "{{ lookup('role_var', '_template', role='error_pages') }}"+error_pages_role_docker_envs_custom: {}+error_pages_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='error_pages')+ | combine(lookup('role_var', '_docker_envs_custom', role='error_pages')) }}" # Volumes-error_pages_docker_volumes_default:- - "/opt/error-pages:/opt/html"-error_pages_docker_volumes_custom: []-error_pages_docker_volumes: "{{ error_pages_docker_volumes_default- + error_pages_docker_volumes_custom }}"--# Devices-error_pages_docker_devices_default: []-error_pages_docker_devices_custom: []-error_pages_docker_devices: "{{ error_pages_docker_devices_default- + error_pages_docker_devices_custom }}"--# Hosts-error_pages_docker_hosts_default: {}-error_pages_docker_hosts_custom: {}-error_pages_docker_hosts: "{{ docker_hosts_common- | combine(error_pages_docker_hosts_default)- | combine(error_pages_docker_hosts_custom) }}"+error_pages_role_docker_volumes_default:+ - "{{ server_appdata_path }}/error-pages:/opt/html"+error_pages_role_docker_volumes_custom: []+error_pages_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='error_pages')+ + lookup('role_var', '_docker_volumes_custom', role='error_pages') }}" # Labels-error_pages_docker_labels_default:+error_pages_role_docker_labels_default: traefik.enable: "true" traefik.http.routers.error-pages-router.rule: "PathPrefix(`/`)" traefik.http.routers.error-pages-router.priority: "5"@@ -82,36 +58,23 @@ traefik.http.middlewares.error-pages-middleware.errors.service: "error-pages-service" traefik.http.middlewares.error-pages-middleware.errors.query: "/{status}.html" traefik.http.services.error-pages-service.loadbalancer.server.port: "8080"-error_pages_docker_labels_custom: {}-error_pages_docker_labels: "{{ docker_labels_common- | combine(error_pages_docker_labels_default)- | combine(error_pages_docker_labels_custom) }}"+error_pages_role_docker_labels_custom: {}+error_pages_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='error_pages')+ | combine(lookup('role_var', '_docker_labels_custom', role='error_pages')) }}" # Hostname-error_pages_docker_hostname: "{{ error_pages_name }}"+error_pages_role_docker_hostname: "{{ error_pages_name }}" # Networks-error_pages_docker_networks_alias: "{{ error_pages_name }}"-error_pages_docker_networks_default: []-error_pages_docker_networks_custom: []-error_pages_docker_networks: "{{ docker_networks_common- + error_pages_docker_networks_default- + error_pages_docker_networks_custom }}"--# Capabilities-error_pages_docker_capabilities_default: []-error_pages_docker_capabilities_custom: []-error_pages_docker_capabilities: "{{ error_pages_docker_capabilities_default- + error_pages_docker_capabilities_custom }}"--# Security Opts-error_pages_docker_security_opts_default: []-error_pages_docker_security_opts_custom: []-error_pages_docker_security_opts: "{{ error_pages_docker_security_opts_default- + error_pages_docker_security_opts_custom }}"+error_pages_role_docker_networks_alias: "{{ error_pages_name }}"+error_pages_role_docker_networks_default: []+error_pages_role_docker_networks_custom: []+error_pages_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='error_pages')+ + lookup('role_var', '_docker_networks_custom', role='error_pages') }}" # Restart Policy-error_pages_docker_restart_policy: unless-stopped+error_pages_role_docker_restart_policy: unless-stopped # State-error_pages_docker_state: started+error_pages_role_docker_state: started
modified
roles/error_pages/tasks/main.yml
@@ -9,13 +9,13 @@ --- - name: Check if legacy folder exists ansible.builtin.stat:- path: "/opt/error_pages"+ path: "{{ server_appdata_path }}/error_pages" register: error_pages_legacy_folder - name: Migrate error pages directory migrate_folder:- legacy_path: /opt/error_pages- new_path: /opt/error-pages+ legacy_path: "{{ server_appdata_path }}/error_pages"+ new_path: "{{ server_appdata_path }}/error-pages" owner: "{{ user.name }}" group: "{{ user.name }}" mode: '0775'@@ -24,12 +24,12 @@ - name: Check if folder already exists ansible.builtin.stat:- path: "/opt/error-pages"+ path: "{{ server_appdata_path }}/error-pages" register: error_pages_folder - name: Create directory ansible.builtin.file:- path: "/opt/error-pages"+ path: "{{ server_appdata_path }}/error-pages" state: directory owner: "{{ user.name }}" group: "{{ user.name }}"@@ -43,12 +43,12 @@ user: "{{ uid }}:{{ gid }}" command: "build --config-file ./error-pages.yml /out" volumes:- - "/opt/error-pages:/out:rw"+ - "{{ server_appdata_path }}/error-pages:/out:rw" networks: - name: saltbox container_default_behavior: compatibility tls_hostname: localhost- auto_remove: yes+ auto_remove: true state: started pull: true when: (not error_pages_folder.stat.exists)
modified
roles/glances/tasks/main.yml
@@ -17,13 +17,6 @@ glances_python: "3.12" glances_path: "/srv/glances" glances_venv_path: "/srv/glances/venv"--- name: "Execute Python role"- ansible.builtin.include_role:- name: "python"- vars:- python_version: "3.10"- when: ansible_distribution_version is version('22.04', '!=') - name: Delete venv folder ansible.builtin.file:
modified
roles/gluetun/defaults/main.yml
@@ -1,10 +1,10 @@ ##########################################################################-# Title: Sandbox: Gluetun | Default Variables #-# Author(s): owine #-# URL: https://github.com/saltyorg/Sandbox #-# -- #+# Title: Saltbox: Gluetun | Default Variables #+# Author(s): owine #+# URL: https://github.com/saltyorg/Saltbox #+# -- # ##########################################################################-# GNU General Public License v3.0 #+# GNU General Public License v3.0 # ########################################################################## --- ################################@@ -16,162 +16,130 @@ ################################ # Settings ################################+ # These variables map to the appropriate Docker ENVs # Review the gluetun wiki (https://github.com/qdm12/gluetun/wiki)-gluetun_vpn_service_provider: ""-gluetun_vpn_type: ""-gluetun_openvpn_custom_config: ""-gluetun_openvpn_endpoint_ip: ""-gluetun_openvpn_endpoint_port: ""-gluetun_openvpn_user: ""-gluetun_openvpn_password: ""-gluetun_openvpn_key_passphrase: ""-gluetun_vpn_endpoint_ip: ""-gluetun_vpn_endpoint_port: ""-gluetun_wireguard_endpoint_ip: ""-gluetun_wireguard_endpoint_port: ""-gluetun_wireguard_mtu: ""-gluetun_wireguard_public_key: ""-gluetun_wireguard_private_key: ""-gluetun_wireguard_preshared_key: ""-gluetun_wireguard_addresses: ""-gluetun_server_countries: ""-gluetun_server_cities: ""-gluetun_server_hostnames: ""-gluetun_server_names: ""-gluetun_server_regions: ""-gluetun_firewall_vpn_input_ports: ""-gluetun_firewall_input_ports: ""-gluetun_firewall_outbound_subnets: ""-gluetun_docker_resolver: true+gluetun_role_vpn_service_provider: ""+gluetun_role_vpn_type: ""+gluetun_role_openvpn_custom_config: ""+gluetun_role_openvpn_endpoint_ip: ""+gluetun_role_openvpn_endpoint_port: ""+gluetun_role_openvpn_user: ""+gluetun_role_openvpn_password: ""+gluetun_role_openvpn_key_passphrase: ""+gluetun_role_vpn_endpoint_ip: ""+gluetun_role_vpn_endpoint_port: ""+gluetun_role_wireguard_endpoint_ip: ""+gluetun_role_wireguard_endpoint_port: ""+gluetun_role_wireguard_mtu: ""+gluetun_role_wireguard_public_key: ""+gluetun_role_wireguard_private_key: ""+gluetun_role_wireguard_preshared_key: ""+gluetun_role_wireguard_addresses: ""+gluetun_role_server_countries: ""+gluetun_role_server_cities: ""+gluetun_role_server_hostnames: ""+gluetun_role_server_names: ""+gluetun_role_server_regions: ""+gluetun_role_firewall_vpn_input_ports: ""+gluetun_role_firewall_input_ports: ""+gluetun_role_firewall_outbound_subnets: ""+gluetun_role_docker_resolver: true ################################ # Paths ################################ -gluetun_paths_folder: "{{ gluetun_name }}"-gluetun_paths_location: "{{ server_appdata_path }}/{{ gluetun_paths_folder }}"-gluetun_paths_folders_list:- - "{{ gluetun_paths_location }}"+gluetun_role_paths_folder: "{{ gluetun_name }}"+gluetun_role_paths_location: "{{ server_appdata_path }}/{{ gluetun_role_paths_folder }}"+gluetun_role_paths_folders_list:+ - "{{ gluetun_role_paths_location }}" ################################ # Docker ################################ # Container-gluetun_docker_container: "{{ gluetun_name }}"+gluetun_role_docker_container: "{{ gluetun_name }}" # Image-gluetun_docker_image_pull: true-gluetun_docker_image_repo: "qmcgaw/gluetun"-gluetun_docker_image_tag: "v3"-gluetun_docker_image: "{{ lookup('vars', gluetun_name + '_docker_image_repo', default=gluetun_docker_image_repo)- + ':' + lookup('vars', gluetun_name + '_docker_image_tag', default=gluetun_docker_image_tag) }}"--# Ports-gluetun_docker_ports_defaults: []-gluetun_docker_ports_custom: []-gluetun_docker_ports: "{{ lookup('vars', gluetun_name + '_docker_ports_defaults', default=gluetun_docker_ports_defaults)- + lookup('vars', gluetun_name + '_docker_ports_custom', default=gluetun_docker_ports_custom) }}"+gluetun_role_docker_image_pull: true+gluetun_role_docker_image_repo: "qmcgaw/gluetun"+gluetun_role_docker_image_tag: "v3"+gluetun_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='gluetun') }}:{{ lookup('role_var', '_docker_image_tag', role='gluetun') }}" # Envs-gluetun_docker_envs_default:- DNS_KEEP_NAMESERVER: "{{ 'on' if lookup('vars', gluetun_name + '_docker_resolver', default=gluetun_docker_resolver) else 'off' }}"- FIREWALL_INPUT_PORTS: "{{ lookup('vars', gluetun_name + '_firewall_input_ports', default='') if (lookup('vars', gluetun_name + '_firewall_input_ports', default='') | length > 0) else omit }}"- FIREWALL_OUTBOUND_SUBNETS: "{{ lookup('vars', gluetun_name + '_firewall_outbound_subnets', default='') if (lookup('vars', gluetun_name + '_firewall_outbound_subnets', default='') | length > 0) else omit }}"- FIREWALL_VPN_INPUT_PORTS: "{{ lookup('vars', gluetun_name + '_firewall_vpn_input_ports', default='') if (lookup('vars', gluetun_name + '_firewall_vpn_input_ports', default='') | length > 0) else omit }}"+gluetun_role_docker_envs_default:+ DNS_KEEP_NAMESERVER: "{{ 'on' if lookup('role_var', '_docker_resolver', role='gluetun') else 'off' }}"+ FIREWALL_INPUT_PORTS: "{{ lookup('role_var', '_firewall_input_ports', role='gluetun') if (lookup('role_var', '_firewall_input_ports', role='gluetun') | length > 0) else omit }}"+ FIREWALL_OUTBOUND_SUBNETS: "{{ lookup('role_var', '_firewall_outbound_subnets', role='gluetun') if (lookup('role_var', '_firewall_outbound_subnets', role='gluetun') | length > 0) else omit }}"+ FIREWALL_VPN_INPUT_PORTS: "{{ lookup('role_var', '_firewall_vpn_input_ports', role='gluetun') if (lookup('role_var', '_firewall_vpn_input_ports', role='gluetun') | length > 0) else omit }}" HTTPPROXY: "on" HTTPPROXY_STEALTH: "on"- OPENVPN_CUSTOM_CONFIG: "{{ lookup('vars', gluetun_name + '_openvpn_custom_config', default='') if (lookup('vars', gluetun_name + '_openvpn_custom_config', default='') | length > 0) else omit }}"- OPENVPN_ENDPOINT_IP: "{{ lookup('vars', gluetun_name + '_openvpn_endpoint_ip', default='') if (lookup('vars', gluetun_name + '_openvpn_endpoint_ip', default='') | length > 0) else omit }}"- OPENVPN_ENDPOINT_PORT: "{{ lookup('vars', gluetun_name + '_openvpn_endpoint_port', default='') if (lookup('vars', gluetun_name + '_openvpn_endpoint_port', default='') | length > 0) else omit }}"- OPENVPN_KEY_PASSPHRASE: "{{ lookup('vars', gluetun_name + '_openvpn_key_passphrase', default='') if (lookup('vars', gluetun_name + '_openvpn_key_passphrase', default='') | length > 0) else omit }}"- OPENVPN_PASSWORD: "{{ lookup('vars', gluetun_name + '_openvpn_password', default='') if (lookup('vars', gluetun_name + '_openvpn_password', default='') | length > 0) else omit }}"- OPENVPN_USER: "{{ lookup('vars', gluetun_name + '_openvpn_user', default='') if (lookup('vars', gluetun_name + '_openvpn_user', default='') | length > 0) else omit }}"+ OPENVPN_CUSTOM_CONFIG: "{{ lookup('role_var', '_openvpn_custom_config', role='gluetun') if (lookup('role_var', '_openvpn_custom_config', role='gluetun') | length > 0) else omit }}"+ OPENVPN_ENDPOINT_IP: "{{ lookup('role_var', '_openvpn_endpoint_ip', role='gluetun') if (lookup('role_var', '_openvpn_endpoint_ip', role='gluetun') | length > 0) else omit }}"+ OPENVPN_ENDPOINT_PORT: "{{ lookup('role_var', '_openvpn_endpoint_port', role='gluetun') if (lookup('role_var', '_openvpn_endpoint_port', role='gluetun') | length > 0) else omit }}"+ OPENVPN_KEY_PASSPHRASE: "{{ lookup('role_var', '_openvpn_key_passphrase', role='gluetun') if (lookup('role_var', '_openvpn_key_passphrase', role='gluetun') | length > 0) else omit }}"+ OPENVPN_PASSWORD: "{{ lookup('role_var', '_openvpn_password', role='gluetun') if (lookup('role_var', '_openvpn_password', role='gluetun') | length > 0) else omit }}"+ OPENVPN_USER: "{{ lookup('role_var', '_openvpn_user', role='gluetun') if (lookup('role_var', '_openvpn_user', role='gluetun') | length > 0) else omit }}" PGID: "{{ gid }}" PUID: "{{ uid }}"- SERVER_CITIES: "{{ lookup('vars', gluetun_name + '_server_cities', default='') if (lookup('vars', gluetun_name + '_server_cities', default='') | length > 0) else omit }}"- SERVER_COUNTRIES: "{{ lookup('vars', gluetun_name + '_server_countries', default='') if (lookup('vars', gluetun_name + '_server_countries', default='') | length > 0) else omit }}"- SERVER_HOSTNAMES: "{{ lookup('vars', gluetun_name + '_server_hostnames', default='') if (lookup('vars', gluetun_name + '_server_hostnames', default='') | length > 0) else omit }}"- SERVER_NAMES: "{{ lookup('vars', gluetun_name + '_server_names', default='') if (lookup('vars', gluetun_name + '_server_names', default='') | length > 0) else omit }}"- SERVER_REGIONS: "{{ lookup('vars', gluetun_name + '_server_regions', default='') if (lookup('vars', gluetun_name + '_server_regions', default='') | length > 0) else omit }}"+ SERVER_CITIES: "{{ lookup('role_var', '_server_cities', role='gluetun') if (lookup('role_var', '_server_cities', role='gluetun') | length > 0) else omit }}"+ SERVER_COUNTRIES: "{{ lookup('role_var', '_server_countries', role='gluetun') if (lookup('role_var', '_server_countries', role='gluetun') | length > 0) else omit }}"+ SERVER_HOSTNAMES: "{{ lookup('role_var', '_server_hostnames', role='gluetun') if (lookup('role_var', '_server_hostnames', role='gluetun') | length > 0) else omit }}"+ SERVER_NAMES: "{{ lookup('role_var', '_server_names', role='gluetun') if (lookup('role_var', '_server_names', role='gluetun') | length > 0) else omit }}"+ SERVER_REGIONS: "{{ lookup('role_var', '_server_regions', role='gluetun') if (lookup('role_var', '_server_regions', role='gluetun') | length > 0) else omit }}" TZ: "{{ tz }}"- VPN_ENDPOINT_IP: "{{ lookup('vars', gluetun_name + '_vpn_endpoint_ip', default='') if (lookup('vars', gluetun_name + '_vpn_endpoint_ip', default='') | length > 0) else omit }}"- VPN_ENDPOINT_PORT: "{{ lookup('vars', gluetun_name + '_vpn_endpoint_port', default='') if (lookup('vars', gluetun_name + '_vpn_endpoint_port', default='') | length > 0) else omit }}"- VPN_SERVICE_PROVIDER: "{{ lookup('vars', gluetun_name + '_vpn_service_provider', default='') if (lookup('vars', gluetun_name + '_vpn_service_provider', default='') | length > 0) else omit }}"- VPN_TYPE: "{{ lookup('vars', gluetun_name + '_vpn_type', default='') if (lookup('vars', gluetun_name + '_vpn_type', default='') | length > 0) else omit }}"- WIREGUARD_ADDRESSES: "{{ lookup('vars', gluetun_name + '_wireguard_addresses', default='') if (lookup('vars', gluetun_name + '_wireguard_addresses', default='') | length > 0) else omit }}"- WIREGUARD_ENDPOINT_IP: "{{ lookup('vars', gluetun_name + '_wireguard_endpoint_ip', default='') if (lookup('vars', gluetun_name + '_wireguard_endpoint_ip', default='') | length > 0) else omit }}"- WIREGUARD_ENDPOINT_PORT: "{{ lookup('vars', gluetun_name + '_wireguard_endpoint_port', default='') if (lookup('vars', gluetun_name + '_wireguard_endpoint_port', default='') | length > 0) else omit }}"- WIREGUARD_MTU: "{{ lookup('vars', gluetun_name + '_wireguard_mtu', default='') if (lookup('vars', gluetun_name + '_wireguard_mtu', default='') | length > 0) else omit }}"- WIREGUARD_PRESHARED_KEY: "{{ lookup('vars', gluetun_name + '_wireguard_preshared_key', default='') if (lookup('vars', gluetun_name + '_wireguard_preshared_key', default='') | length > 0) else omit }}"- WIREGUARD_PRIVATE_KEY: "{{ lookup('vars', gluetun_name + '_wireguard_private_key', default='') if (lookup('vars', gluetun_name + '_wireguard_private_key', default='') | length > 0) else omit }}"- WIREGUARD_PUBLIC_KEY: "{{ lookup('vars', gluetun_name + '_wireguard_public_key', default='') if (lookup('vars', gluetun_name + '_wireguard_public_key', default='') | length > 0) else omit }}"-gluetun_docker_envs_custom: {}-gluetun_docker_envs: "{{ lookup('vars', gluetun_name + '_docker_envs_default', default=gluetun_docker_envs_default)- | combine(lookup('vars', gluetun_name + '_docker_envs_custom', default=gluetun_docker_envs_custom)) }}"--# Commands-gluetun_docker_commands_default: []-gluetun_docker_commands_custom: []-gluetun_docker_commands: "{{ lookup('vars', gluetun_name + '_docker_commands_default', default=gluetun_docker_commands_default)- + lookup('vars', gluetun_name + '_docker_commands_custom', default=gluetun_docker_commands_custom) }}"+ VPN_ENDPOINT_IP: "{{ lookup('role_var', '_vpn_endpoint_ip', role='gluetun') if (lookup('role_var', '_vpn_endpoint_ip', role='gluetun') | length > 0) else omit }}"+ VPN_ENDPOINT_PORT: "{{ lookup('role_var', '_vpn_endpoint_port', role='gluetun') if (lookup('role_var', '_vpn_endpoint_port', role='gluetun') | length > 0) else omit }}"+ VPN_SERVICE_PROVIDER: "{{ lookup('role_var', '_vpn_service_provider', role='gluetun') if (lookup('role_var', '_vpn_service_provider', role='gluetun') | length > 0) else omit }}"+ VPN_TYPE: "{{ lookup('role_var', '_vpn_type', role='gluetun') if (lookup('role_var', '_vpn_type', role='gluetun') | length > 0) else omit }}"+ WIREGUARD_ADDRESSES: "{{ lookup('role_var', '_wireguard_addresses', role='gluetun') if (lookup('role_var', '_wireguard_addresses', role='gluetun') | length > 0) else omit }}"+ WIREGUARD_ENDPOINT_IP: "{{ lookup('role_var', '_wireguard_endpoint_ip', role='gluetun') if (lookup('role_var', '_wireguard_endpoint_ip', role='gluetun') | length > 0) else omit }}"+ WIREGUARD_ENDPOINT_PORT: "{{ lookup('role_var', '_wireguard_endpoint_port', role='gluetun') if (lookup('role_var', '_wireguard_endpoint_port', role='gluetun') | length > 0) else omit }}"+ WIREGUARD_MTU: "{{ lookup('role_var', '_wireguard_mtu', role='gluetun') if (lookup('role_var', '_wireguard_mtu', role='gluetun') | length > 0) else omit }}"+ WIREGUARD_PRESHARED_KEY: "{{ lookup('role_var', '_wireguard_preshared_key', role='gluetun') if (lookup('role_var', '_wireguard_preshared_key', role='gluetun') | length > 0) else omit }}"+ WIREGUARD_PRIVATE_KEY: "{{ lookup('role_var', '_wireguard_private_key', role='gluetun') if (lookup('role_var', '_wireguard_private_key', role='gluetun') | length > 0) else omit }}"+ WIREGUARD_PUBLIC_KEY: "{{ lookup('role_var', '_wireguard_public_key', role='gluetun') if (lookup('role_var', '_wireguard_public_key', role='gluetun') | length > 0) else omit }}"+gluetun_role_docker_envs_custom: {}+gluetun_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='gluetun')+ | combine(lookup('role_var', '_docker_envs_custom', role='gluetun')) }}" # Volumes-gluetun_docker_volumes_global: false-gluetun_docker_volumes_default:- - "{{ gluetun_paths_location }}:/gluetun"-gluetun_docker_volumes_custom: []-gluetun_docker_volumes: "{{ lookup('vars', gluetun_name + '_docker_volumes_default', default=gluetun_docker_volumes_default)- + lookup('vars', gluetun_name + '_docker_volumes_custom', default=gluetun_docker_volumes_custom) }}"--# Devices-gluetun_docker_devices_default: []-gluetun_docker_devices_custom: []-gluetun_docker_devices: "{{ lookup('vars', gluetun_name + '_docker_devices_default', default=gluetun_docker_devices_default)- + lookup('vars', gluetun_name + '_docker_devices_custom', default=gluetun_docker_devices_custom) }}"--# Hosts-gluetun_docker_hosts_default: {}-gluetun_docker_hosts_custom: {}-gluetun_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', gluetun_name + '_docker_hosts_default', default=gluetun_docker_hosts_default))- | combine(lookup('vars', gluetun_name + '_docker_hosts_custom', default=gluetun_docker_hosts_custom)) }}"+gluetun_role_docker_volumes_global: false+gluetun_role_docker_volumes_default:+ - "{{ gluetun_role_paths_location }}:/gluetun"+gluetun_role_docker_volumes_custom: []+gluetun_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='gluetun')+ + lookup('role_var', '_docker_volumes_custom', role='gluetun') }}" # Labels-gluetun_docker_labels_default:+gluetun_role_docker_labels_default: com.centurylinklabs.watchtower.enable: "false"-gluetun_docker_labels_custom: {}-gluetun_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', gluetun_name + '_docker_labels_default', default=gluetun_docker_labels_default))- | combine(lookup('vars', gluetun_name + '_docker_labels_custom', default=gluetun_docker_labels_custom)) }}"+gluetun_role_docker_labels_custom: {}+gluetun_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='gluetun')+ | combine(lookup('role_var', '_docker_labels_custom', role='gluetun')) }}" # Hostname-gluetun_docker_hostname: "{{ gluetun_name }}"+gluetun_role_docker_hostname: "{{ gluetun_name }}" # Networks-gluetun_docker_networks_alias: "{{ gluetun_name }}"-gluetun_docker_networks_default: []-gluetun_docker_networks_custom: []-gluetun_docker_networks: "{{ docker_networks_common- + lookup('vars', gluetun_name + '_docker_networks_default', default=gluetun_docker_networks_default)- + lookup('vars', gluetun_name + '_docker_networks_custom', default=gluetun_docker_networks_custom) }}"+gluetun_role_docker_networks_alias: "{{ gluetun_name }}"+gluetun_role_docker_networks_default: []+gluetun_role_docker_networks_custom: []+gluetun_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='gluetun')+ + lookup('role_var', '_docker_networks_custom', role='gluetun') }}" # Capabilities-gluetun_docker_capabilities_default:+gluetun_role_docker_capabilities_default: - NET_ADMIN-gluetun_docker_capabilities_custom: []-gluetun_docker_capabilities: "{{ lookup('vars', gluetun_name + '_docker_capabilities_default', default=gluetun_docker_capabilities_default)- + lookup('vars', gluetun_name + '_docker_capabilities_custom', default=gluetun_docker_capabilities_custom) }}"--# Security Opts-gluetun_docker_security_opts_default: []-gluetun_docker_security_opts_custom: []-gluetun_docker_security_opts: "{{ lookup('vars', gluetun_name + '_docker_security_opts_default', default=gluetun_docker_security_opts_default)- + lookup('vars', gluetun_name + '_docker_security_opts_custom', default=gluetun_docker_security_opts_custom) }}"+gluetun_role_docker_capabilities_custom: []+gluetun_role_docker_capabilities: "{{ lookup('role_var', '_docker_capabilities_default', role='gluetun')+ + lookup('role_var', '_docker_capabilities_custom', role='gluetun') }}" # Restart Policy-gluetun_docker_restart_policy: unless-stopped+gluetun_role_docker_restart_policy: unless-stopped # State-gluetun_docker_state: started+gluetun_role_docker_state: started
modified
roles/gluetun/tasks/main.yml
@@ -1,7 +1,7 @@ ##########################################################################-# Title: Sandbox: Gluetun #+# Title: Saltbox: Gluetun # # Author(s): owine #-# URL: https://github.com/saltyorg/Sandbox #+# URL: https://github.com/saltyorg/Saltbox # # -- # ########################################################################## # GNU General Public License v3.0 #
modified
roles/gluetun/tasks/main2.yml
@@ -1,7 +1,7 @@ ##########################################################################-# Title: Sandbox: Gluetun #+# Title: Saltbox: Gluetun # # Author(s): owine #-# URL: https://github.com/saltyorg/Sandbox #+# URL: https://github.com/saltyorg/Saltbox # # -- # ########################################################################## # GNU General Public License v3.0 #
modified
roles/grafana/defaults/main.yml
@@ -1,9 +1,9 @@ #########################################################################-# Title: Saltbox: Grafana Role | Default Variables #-# Author(s): desimaniac, salty #-# URL: https://github.com/saltyorg/Saltbox #+# Title: Saltbox: Grafana | Default Variables #+# Author(s): desimaniac, salty #+# URL: https://github.com/saltyorg/Saltbox # #########################################################################-# GNU General Public License v3.0 #+# GNU General Public License v3.0 # ######################################################################### --- ################################@@ -13,144 +13,101 @@ grafana_name: grafana ################################+# Settings+################################++# Comma separated list of plugins+grafana_role_plugins: ""++################################ # Paths ################################ -grafana_paths_folder: "{{ grafana_name }}"-grafana_paths_location: "{{ server_appdata_path }}/{{ grafana_paths_folder }}"-grafana_paths_folders_list:- - "{{ grafana_paths_location }}"+grafana_role_paths_folder: "{{ grafana_name }}"+grafana_role_paths_location: "{{ server_appdata_path }}/{{ grafana_role_paths_folder }}"+grafana_role_paths_folders_list:+ - "{{ grafana_role_paths_location }}" ################################ # Web ################################ -grafana_web_subdomain: "{{ grafana_name }}"-grafana_web_domain: "{{ user.domain }}"-grafana_web_port: "3000"-grafana_web_url: "{{ 'https://' + (grafana_web_subdomain + '.' + grafana_web_domain- if (grafana_web_subdomain | length > 0)- else grafana_web_domain) }}"+grafana_role_web_subdomain: "{{ grafana_name }}"+grafana_role_web_domain: "{{ user.domain }}"+grafana_role_web_port: "3000"+grafana_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='grafana') + '.' + lookup('role_var', '_web_domain', role='grafana')+ if (lookup('role_var', '_web_subdomain', role='grafana') | length > 0)+ else lookup('role_var', '_web_domain', role='grafana')) }}" ################################ # DNS ################################ -grafana_dns_record: "{{ grafana_web_subdomain }}"-grafana_dns_zone: "{{ grafana_web_domain }}"-grafana_dns_proxy: "{{ dns.proxied }}"+grafana_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='grafana') }}"+grafana_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='grafana') }}"+grafana_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -grafana_traefik_sso_middleware: ""-grafana_traefik_middleware_default: "{{ traefik_default_middleware }}"-grafana_traefik_middleware_custom: ""-grafana_traefik_certresolver: "{{ traefik_default_certresolver }}"-grafana_traefik_enabled: true-grafana_traefik_api_enabled: false-grafana_traefik_api_endpoint: ""--################################-# Plugins-################################--# Comma separated list of plugins-grafana_plugins: ""+grafana_role_traefik_sso_middleware: ""+grafana_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+grafana_role_traefik_middleware_custom: ""+grafana_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+grafana_role_traefik_enabled: true+grafana_role_traefik_api_enabled: false+grafana_role_traefik_api_endpoint: "" ################################ # Docker ################################ # Container-grafana_docker_container: "{{ grafana_name }}"+grafana_role_docker_container: "{{ grafana_name }}" # Image-grafana_docker_image_pull: true-grafana_docker_image_tag: "latest"-grafana_docker_image: "grafana/grafana:{{ grafana_docker_image_tag }}"--# Ports-grafana_docker_ports_defaults: []-grafana_docker_ports_custom: []-grafana_docker_ports: "{{ grafana_docker_ports_defaults- + grafana_docker_ports_custom }}"+grafana_role_docker_image_pull: true+grafana_role_docker_image_repo: "grafana/grafana"+grafana_role_docker_image_tag: "latest"+grafana_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='grafana') }}:{{ lookup('role_var', '_docker_image_tag', role='grafana') }}" # Envs-grafana_docker_envs_default:+grafana_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" TZ: "{{ tz }}"- GF_SERVER_ROOT_URL: "https://grafana.{{ user.domain }}"+ GF_SERVER_ROOT_URL: "{{ lookup('role_var', '_web_url', role='grafana') }}" GF_SECURITY_ADMIN_USER: "{{ user.name }}" GF_SECURITY_ADMIN_PASSWORD: "{{ user.pass }}"- GF_INSTALL_PLUGINS: "{{ grafana_plugins }}"-grafana_docker_envs_custom: {}-grafana_docker_envs: "{{ grafana_docker_envs_default- | combine(grafana_docker_envs_custom) }}"--# Commands-grafana_docker_commands_default: []-grafana_docker_commands_custom: []-grafana_docker_commands: "{{ grafana_docker_commands_default- + grafana_docker_commands_custom }}"+ GF_INSTALL_PLUGINS: "{{ lookup('role_var', '_plugins', role='grafana') }}"+grafana_role_docker_envs_custom: {}+grafana_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='grafana')+ | combine(lookup('role_var', '_docker_envs_custom', role='grafana')) }}" # Volumes-grafana_docker_volumes_default:- - "{{ grafana_paths_location }}:/var/lib/grafana"-grafana_docker_volumes_custom: []-grafana_docker_volumes: "{{ grafana_docker_volumes_default- + grafana_docker_volumes_custom }}"--# Devices-grafana_docker_devices_default: []-grafana_docker_devices_custom: []-grafana_docker_devices: "{{ grafana_docker_devices_default- + grafana_docker_devices_custom }}"--# Hosts-grafana_docker_hosts_default: {}-grafana_docker_hosts_custom: {}-grafana_docker_hosts: "{{ docker_hosts_common- | combine(grafana_docker_hosts_default)- | combine(grafana_docker_hosts_custom) }}"--# Labels-grafana_docker_labels_default: {}-grafana_docker_labels_custom: {}-grafana_docker_labels: "{{ docker_labels_common- | combine(grafana_docker_labels_default)- | combine(grafana_docker_labels_custom) }}"+grafana_role_docker_volumes_default:+ - "{{ grafana_role_paths_location }}:/var/lib/grafana"+grafana_role_docker_volumes_custom: []+grafana_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='grafana')+ + lookup('role_var', '_docker_volumes_custom', role='grafana') }}" # Hostname-grafana_docker_hostname: "{{ grafana_name }}"+grafana_role_docker_hostname: "{{ grafana_name }}" # Networks-grafana_docker_networks_alias: "{{ grafana_name }}"-grafana_docker_networks_default: []-grafana_docker_networks_custom: []-grafana_docker_networks: "{{ docker_networks_common- + grafana_docker_networks_default- + grafana_docker_networks_custom }}"--# Capabilities-grafana_docker_capabilities_default: []-grafana_docker_capabilities_custom: []-grafana_docker_capabilities: "{{ grafana_docker_capabilities_default- + grafana_docker_capabilities_custom }}"--# Security Opts-grafana_docker_security_opts_default: []-grafana_docker_security_opts_custom: []-grafana_docker_security_opts: "{{ grafana_docker_security_opts_default- + grafana_docker_security_opts_custom }}"+grafana_role_docker_networks_alias: "{{ grafana_name }}"+grafana_role_docker_networks_default: []+grafana_role_docker_networks_custom: []+grafana_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='grafana')+ + lookup('role_var', '_docker_networks_custom', role='grafana') }}" # Restart Policy-grafana_docker_restart_policy: unless-stopped+grafana_role_docker_restart_policy: unless-stopped # State-grafana_docker_state: started+grafana_role_docker_state: started # User-grafana_docker_user: "{{ uid }}:{{ gid }}"+grafana_role_docker_user: "{{ uid }}:{{ gid }}"
modified
roles/grafana/tasks/main.yml
@@ -9,9 +9,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"
modified
roles/hetzner/tasks/main.yml
@@ -8,7 +8,7 @@ ######################################################################### --- - name: "Hetzner Tasks"- ansible.builtin.import_tasks: "subtasks/hetzner.yml"+ ansible.builtin.include_tasks: "subtasks/hetzner.yml" - name: "Restart Tasks" ansible.builtin.include_tasks: "subtasks/restart.yml"
modified
roles/hetzner/tasks/subtasks/hetzner.yml
@@ -10,7 +10,7 @@ - name: Hetzner | Install 'pciutils' ansible.builtin.apt: name: pciutils- state: present+ state: latest - name: Hetzner | Fetch PCI info ansible.builtin.shell: "lspci -v -s $(lspci | grep -E '.*VGA.*Intel.*' | cut -d' ' -f 1) 2>/dev/null || :"
modified
roles/hetzner_nfs/tasks/hetzner_nfs_mount.yml
@@ -15,7 +15,7 @@ owner: "{{ user.name }}" group: "{{ user.name }}" with_items:- - /opt/hetzner_nfs+ - "{{ server_appdata_path }}/hetzner_nfs" - /mnt/feeder - name: Import 'hosts.deny' file@@ -38,7 +38,7 @@ - name: Install nfs requirements ansible.builtin.apt:- state: present+ state: latest name: - rpcbind - nfs-common@@ -46,7 +46,7 @@ - name: Import 'init_vlan.sh' file ansible.builtin.template: src: client/init_vlan.sh.j2- dest: /opt/hetzner_nfs/init_vlan.sh+ dest: "{{ server_appdata_path }}/hetzner_nfs/init_vlan.sh" owner: '{{ user.name }}' group: '{{ user.name }}' mode: "0775"@@ -82,7 +82,7 @@ regexp: '(^After\=network-online\.target).*' line: '\1 init_vlan.service' state: present- backrefs: yes+ backrefs: true when: mergerfs_service.stat.exists - name: Replace '/mnt/local' with '/mnt/feeder' in '{{ mergerfs_service_name }}'
modified
roles/hetzner_nfs/tasks/hetzner_nfs_server.yml
@@ -15,7 +15,7 @@ owner: "{{ user.name }}" group: "{{ user.name }}" with_items:- - /opt/hetzner_nfs+ - "{{ server_appdata_path }}/hetzner_nfs" - name: Import 'hosts.deny' file ansible.builtin.template:@@ -28,7 +28,7 @@ - name: Install nfs server ansible.builtin.apt:- state: present+ state: latest name: - rpcbind - nfs-kernel-server@@ -48,7 +48,7 @@ - name: Import 'init_vlan.sh' file ansible.builtin.template: src: server/init_vlan.sh.j2- dest: /opt/hetzner_nfs/init_vlan.sh+ dest: "{{ server_appdata_path }}/hetzner_nfs/init_vlan.sh" owner: '{{ user.name }}' group: '{{ user.name }}' mode: "0775"
modified
roles/hetzner_nfs/tasks/hetzner_nfs_server_uninstall.yml
@@ -35,7 +35,7 @@ - "/etc/systemd/system/nfs_vlan.service" - "/etc/exports" - "/etc/hosts.deny"- - "/opt/hetzner_nfs"+ - "{{ server_appdata_path }}/hetzner_nfs" - name: Reboot message ansible.builtin.debug:
modified
roles/hetzner_nfs/tasks/hetzner_nfs_unmount.yml
@@ -35,7 +35,7 @@ - "/etc/systemd/system/nfs_vlan.service" - "/etc/hosts.allow" - "/etc/hosts.deny"- - "/opt/hetzner_nfs"+ - "{{ server_appdata_path }}/hetzner_nfs" - "/mnt/feeder" # mergerfs edits@@ -46,7 +46,7 @@ regexp: '(^After\=.*)\sinit_vlan\.service\s(.*)' line: '\1 \2' state: present- backrefs: yes+ backrefs: true when: mergerfs_service.stat.exists - name: Replace '/mnt/feeder' with '/mnt/local' in '{{ mergerfs_service_name }}'
modified
roles/hetzner_nfs/tasks/services_stop.yml
@@ -18,7 +18,7 @@ - name: Get 'docker.service' state ansible.builtin.set_fact:- docker_service_running: "{{ (services['docker.service'] is defined) and (services['docker.service']['state'] == 'running') }}"+ docker_service_running: "{{ (ansible_facts['services']['docker.service'] is defined) and (ansible_facts['services']['docker.service']['state'] == 'running') }}" when: docker_binary.stat.exists - name: Gather list of running Docker containers@@ -52,7 +52,7 @@ - name: Get '{{ mergerfs_service_name }}' state ansible.builtin.set_fact:- mergerfs_service_running: "{{ (services[mergerfs_service_name] is defined) and (services[mergerfs_service_name]['state'] == 'running') }}"+ mergerfs_service_running: "{{ (ansible_facts['services'][mergerfs_service_name] is defined) and (ansible_facts['services'][mergerfs_service_name]['state'] == 'running') }}" when: mergerfs_service.stat.exists - name: Stop existing '{{ mergerfs_service_name }}'
modified
roles/hetzner_nfs/templates/client/init_vlan.sh.j2
@@ -1,7 +1,7 @@ #!/bin/bash-sudo ip link add link {{ ansible_default_ipv4.interface }} name {{ ansible_default_ipv4.interface }}.{{ hetzner_nfs.vlan_id }} type vlan id {{ hetzner_nfs.vlan_id }}-sudo ip link set {{ ansible_default_ipv4.interface }}.{{ hetzner_nfs.vlan_id }} mtu 1400-sudo ip link set dev {{ ansible_default_ipv4.interface }}.{{ hetzner_nfs.vlan_id }} up-sudo ip addr add 192.168.100.{{ hetzner_nfs.mount_client }}/24 brd 192.168.100.255 dev {{ ansible_default_ipv4.interface }}.{{ hetzner_nfs.vlan_id }}+sudo ip link add link {{ ansible_facts['default_ipv4']['interface'] }} name {{ ansible_facts['default_ipv4']['interface'] }}.{{ hetzner_nfs.vlan_id }} type vlan id {{ hetzner_nfs.vlan_id }}+sudo ip link set {{ ansible_facts['default_ipv4']['interface'] }}.{{ hetzner_nfs.vlan_id }} mtu 1400+sudo ip link set dev {{ ansible_facts['default_ipv4']['interface'] }}.{{ hetzner_nfs.vlan_id }} up+sudo ip addr add 192.168.100.{{ hetzner_nfs.mount_client }}/24 brd 192.168.100.255 dev {{ ansible_facts['default_ipv4']['interface'] }}.{{ hetzner_nfs.vlan_id }} sudo umount /mnt/feeder sudo mount 192.168.100.2:/mnt/local /mnt/feeder
modified
roles/hetzner_nfs/templates/client/nfs_vlan.service.j2
@@ -6,7 +6,7 @@ After=network.target network-online.target [Service]-ExecStart=/opt/hetzner_nfs/init_vlan.sh+ExecStart={{ server_appdata_path }}/hetzner_nfs/init_vlan.sh [Install] WantedBy=multi-user.target
modified
roles/hetzner_nfs/templates/server/init_vlan.sh.j2
@@ -1,5 +1,5 @@ #!/bin/bash-sudo ip link add link {{ ansible_default_ipv4.interface }} name {{ ansible_default_ipv4.interface }}.{{ hetzner_nfs.vlan_id }} type vlan id {{ hetzner_nfs.vlan_id }}-sudo ip link set {{ ansible_default_ipv4.interface }}.{{ hetzner_nfs.vlan_id }} mtu 1400-sudo ip link set dev {{ ansible_default_ipv4.interface }}.{{ hetzner_nfs.vlan_id }} up-sudo ip addr add 192.168.100.{{ hetzner_nfs.server }}/24 brd 192.168.100.255 dev {{ ansible_default_ipv4.interface }}.{{ hetzner_nfs.vlan_id }}+sudo ip link add link {{ ansible_facts['default_ipv4']['interface'] }} name {{ ansible_facts['default_ipv4']['interface'] }}.{{ hetzner_nfs.vlan_id }} type vlan id {{ hetzner_nfs.vlan_id }}+sudo ip link set {{ ansible_facts['default_ipv4']['interface'] }}.{{ hetzner_nfs.vlan_id }} mtu 1400+sudo ip link set dev {{ ansible_facts['default_ipv4']['interface'] }}.{{ hetzner_nfs.vlan_id }} up+sudo ip addr add 192.168.100.{{ hetzner_nfs.server }}/24 brd 192.168.100.255 dev {{ ansible_facts['default_ipv4']['interface'] }}.{{ hetzner_nfs.vlan_id }}
modified
roles/hetzner_nfs/templates/server/nfs_vlan.service.j2
@@ -6,7 +6,7 @@ After=network.target network-online.target [Service]-ExecStart=/opt/hetzner_nfs/init_vlan.sh+ExecStart={{ server_appdata_path }}/hetzner_nfs/init_vlan.sh [Install] WantedBy=multi-user.target
modified
roles/hetzner_vlan/tasks/main.yml
@@ -9,7 +9,7 @@ --- - name: Setup Netplan VLAN ansible.builtin.shell: |- yyq -i '.network.vlans += {"{{ ansible_default_ipv4.interface }}.{{ hetzner_vlan.vlan_id }}": {"id": {{ hetzner_vlan.vlan_id }}, "link": "{{ ansible_default_ipv4.interface }}", "mtu": 1400, "addresses": ["192.168.100.{{ hetzner_vlan.client_number }}/24"]}}' /etc/netplan/01-netcfg.yaml+ yyq -i '.network.vlans += {"{{ ansible_facts['default_ipv4']['interface'] }}.{{ hetzner_vlan.vlan_id }}": {"id": {{ hetzner_vlan.vlan_id }}, "link": "{{ ansible_facts['default_ipv4']['interface'] }}", "mtu": 1400, "addresses": ["192.168.100.{{ hetzner_vlan.client_number }}/24"]}}' /etc/netplan/01-netcfg.yaml when: ('hetzner-vlan-deploy' in ansible_run_tags) - name: Remove Netplan VLAN@@ -22,5 +22,5 @@ when: hetzner_vlan_netplan_apply - name: Remove VLAN link- ansible.builtin.shell: ip link delete {{ ansible_default_ipv4.interface }}.{{ hetzner_vlan.vlan_id }}+ ansible.builtin.shell: ip link delete {{ ansible_facts['default_ipv4']['interface'] }}.{{ hetzner_vlan.vlan_id }} when: ('hetzner-vlan-remove' in ansible_run_tags) and hetzner_vlan_netplan_apply
modified
roles/jackett/defaults/main.yml
@@ -17,145 +17,108 @@ # Paths ################################ -jackett_paths_folder: "{{ jackett_name }}"-jackett_paths_location: "{{ server_appdata_path }}/{{ jackett_paths_folder }}"-jackett_paths_folders_list:- - "{{ jackett_paths_location }}"+jackett_role_paths_folder: "{{ jackett_name }}"+jackett_role_paths_location: "{{ server_appdata_path }}/{{ jackett_role_paths_folder }}"+jackett_role_paths_folders_list:+ - "{{ jackett_role_paths_location }}" ################################ # Web ################################ -jackett_web_subdomain: "{{ jackett_name }}"-jackett_web_domain: "{{ user.domain }}"-jackett_web_port: "9117"-jackett_web_url: "{{ 'https://' + (jackett_web_subdomain + '.' + jackett_web_domain- if (jackett_web_subdomain | length > 0)- else jackett_web_domain) }}"+jackett_role_web_subdomain: "{{ jackett_name }}"+jackett_role_web_domain: "{{ user.domain }}"+jackett_role_web_port: "9117"+jackett_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='jackett') + '.' + lookup('role_var', '_web_domain', role='jackett')+ if (lookup('role_var', '_web_subdomain', role='jackett') | length > 0)+ else lookup('role_var', '_web_domain', role='jackett')) }}" ################################ # DNS ################################ -jackett_dns_record: "{{ jackett_web_subdomain }}"-jackett_dns_zone: "{{ jackett_web_domain }}"-jackett_dns_proxy: "{{ dns.proxied }}"+jackett_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='jackett') }}"+jackett_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='jackett') }}"+jackett_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -jackett_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-jackett_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', jackett_name + '_name', default=jackett_name)- if (jackett_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-jackett_traefik_middleware_custom: ""-jackett_traefik_certresolver: "{{ traefik_default_certresolver }}"-jackett_traefik_enabled: true-jackett_traefik_api_enabled: true-jackett_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/dl`)"+jackett_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+jackett_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + jackett_name+ if (lookup('role_var', '_themepark_enabled', role='jackett') and global_themepark_plugin_enabled)+ else '') }}"+jackett_role_traefik_middleware_custom: ""+jackett_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+jackett_role_traefik_enabled: true+jackett_role_traefik_api_enabled: true+jackett_role_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/dl`)" ################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-jackett_themepark_enabled: false-jackett_themepark_app: "jackett"-jackett_themepark_theme: "{{ global_themepark_theme }}"-jackett_themepark_domain: "{{ global_themepark_domain }}"-jackett_themepark_addons: []+jackett_role_themepark_enabled: false+jackett_role_themepark_app: "jackett"+jackett_role_themepark_theme: "{{ global_themepark_theme }}"+jackett_role_themepark_domain: "{{ global_themepark_domain }}"+jackett_role_themepark_addons: [] ################################ # Docker ################################ # Container-jackett_docker_container: "{{ jackett_name }}"+jackett_role_docker_container: "{{ jackett_name }}" # Image-jackett_docker_image_pull: true-jackett_docker_image_tag: "release"-jackett_docker_image: "ghcr.io/hotio/jackett:{{ jackett_docker_image_tag }}"--# Ports-jackett_docker_ports_defaults: []-jackett_docker_ports_custom: []-jackett_docker_ports: "{{ jackett_docker_ports_defaults- + jackett_docker_ports_custom }}"+jackett_role_docker_image_pull: true+jackett_role_docker_image_repo: "ghcr.io/hotio/jackett"+jackett_role_docker_image_tag: "release"+jackett_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='jackett') }}:{{ lookup('role_var', '_docker_image_tag', role='jackett') }}" # Envs-jackett_docker_envs_default:+jackett_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" UMASK: "002" TZ: "{{ tz }}"-jackett_docker_envs_custom: {}-jackett_docker_envs: "{{ jackett_docker_envs_default- | combine(jackett_docker_envs_custom) }}"--# Commands-jackett_docker_commands_default: []-jackett_docker_commands_custom: []-jackett_docker_commands: "{{ jackett_docker_commands_default- + jackett_docker_commands_custom }}"+jackett_role_docker_envs_custom: {}+jackett_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='jackett')+ | combine(lookup('role_var', '_docker_envs_custom', role='jackett')) }}" # Volumes-jackett_docker_volumes_default:- - "{{ jackett_paths_location }}:/config"-jackett_docker_volumes_custom: []-jackett_docker_volumes: "{{ jackett_docker_volumes_default- + jackett_docker_volumes_custom }}"--# Devices-jackett_docker_devices_default: []-jackett_docker_devices_custom: []-jackett_docker_devices: "{{ jackett_docker_devices_default- + jackett_docker_devices_custom }}"--# Hosts-jackett_docker_hosts_default: {}-jackett_docker_hosts_custom: {}-jackett_docker_hosts: "{{ docker_hosts_common- | combine(jackett_docker_hosts_default)- | combine(jackett_docker_hosts_custom) }}"+jackett_role_docker_volumes_default:+ - "{{ jackett_role_paths_location }}:/config"+jackett_role_docker_volumes_custom: []+jackett_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='jackett')+ + lookup('role_var', '_docker_volumes_custom', role='jackett') }}" # Labels-jackett_docker_labels_default: {}-jackett_docker_labels_custom: {}-jackett_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', jackett_name + '_docker_labels_default', default=jackett_docker_labels_default))- | combine((traefik_themepark_labels- if (jackett_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', jackett_name + '_docker_labels_custom', default=jackett_docker_labels_custom)) }}"+jackett_role_docker_labels_default: {}+jackett_role_docker_labels_custom: {}+jackett_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='jackett')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='jackett') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='jackett')) }}" # Hostname-jackett_docker_hostname: "{{ jackett_name }}"+jackett_role_docker_hostname: "{{ jackett_name }}" # Networks-jackett_docker_networks_alias: "{{ jackett_name }}"-jackett_docker_networks_default: []-jackett_docker_networks_custom: []-jackett_docker_networks: "{{ docker_networks_common- + jackett_docker_networks_default- + jackett_docker_networks_custom }}"--# Capabilities-jackett_docker_capabilities_default: []-jackett_docker_capabilities_custom: []-jackett_docker_capabilities: "{{ jackett_docker_capabilities_default- + jackett_docker_capabilities_custom }}"--# Security Opts-jackett_docker_security_opts_default: []-jackett_docker_security_opts_custom: []-jackett_docker_security_opts: "{{ jackett_docker_security_opts_default- + jackett_docker_security_opts_custom }}"+jackett_role_docker_networks_alias: "{{ jackett_name }}"+jackett_role_docker_networks_default: []+jackett_role_docker_networks_custom: []+jackett_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='jackett')+ + lookup('role_var', '_docker_networks_custom', role='jackett') }}" # Restart Policy-jackett_docker_restart_policy: unless-stopped+jackett_role_docker_restart_policy: unless-stopped # State-jackett_docker_state: started+jackett_role_docker_state: started
modified
roles/jackett/tasks/main.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"
modified
roles/jellyfin/defaults/main.yml
@@ -1,10 +1,10 @@ #########################################################################-# Title: Saltbox: Jellyfin #-# Author(s): kowalski, desimaniac, owine #-# URL: https://github.com/saltyorg/Saltbox #-# -- #+# Title: Saltbox: Jellyfin | Default Variables #+# Author(s): kowalski, desimaniac, owine #+# URL: https://github.com/saltyorg/Saltbox #+# -- # #########################################################################-# GNU General Public License v3.0 #+# GNU General Public License v3.0 # ######################################################################### --- ################################@@ -17,184 +17,135 @@ # Paths ################################ -jellyfin_paths_folder: "{{ jellyfin_name }}"-jellyfin_paths_location: "{{ server_appdata_path }}/{{ jellyfin_paths_folder }}"-jellyfin_paths_transcodes_location: "{{ transcodes_path }}/{{ jellyfin_paths_folder }}"-jellyfin_paths_folders_list:- - "{{ jellyfin_paths_location }}"- - "{{ jellyfin_paths_location }}/data"- - "{{ jellyfin_paths_location }}/log"- - "{{ jellyfin_paths_location }}/cache"- - "{{ jellyfin_paths_transcodes_location }}"-jellyfin_paths_dlna_location: "{{ jellyfin_paths_location }}/dlna.xml"-jellyfin_paths_sys_xml_location: "{{ jellyfin_paths_location }}/system.xml"-jellyfin_paths_net_xml_location: "{{ jellyfin_paths_location }}/network.xml"-jellyfin_paths_xml_loct_old: "{{ jellyfin_paths_location }}/app/config/system.xml"+jellyfin_role_paths_folder: "{{ jellyfin_name }}"+jellyfin_role_paths_location: "{{ server_appdata_path }}/{{ jellyfin_role_paths_folder }}"+jellyfin_role_paths_transcodes_location: "{{ transcodes_path }}/{{ jellyfin_role_paths_folder }}"+jellyfin_role_paths_folders_list:+ - "{{ jellyfin_role_paths_location }}"+ - "{{ jellyfin_role_paths_location }}/data"+ - "{{ jellyfin_role_paths_location }}/log"+ - "{{ jellyfin_role_paths_location }}/cache"+ - "{{ jellyfin_role_paths_transcodes_location }}"+jellyfin_role_paths_dlna_location: "{{ jellyfin_role_paths_location }}/dlna.xml"+jellyfin_role_paths_sys_xml_location: "{{ jellyfin_role_paths_location }}/system.xml"+jellyfin_role_paths_net_xml_location: "{{ jellyfin_role_paths_location }}/network.xml"+jellyfin_role_paths_xml_location_old: "{{ jellyfin_role_paths_location }}/app/config/system.xml" ################################ # Web ################################ -jellyfin_web_subdomain: "{{ jellyfin_name }}"-jellyfin_web_domain: "{{ user.domain }}"-jellyfin_web_port: "8096"-jellyfin_web_url: "{{ 'https://' + (lookup('vars', jellyfin_name + '_web_subdomain', default=jellyfin_web_subdomain) + '.' + lookup('vars', jellyfin_name + '_web_domain', default=jellyfin_web_domain)- if (lookup('vars', jellyfin_name + '_web_subdomain', default=jellyfin_web_subdomain) | length > 0)- else lookup('vars', jellyfin_name + '_web_domain', default=jellyfin_web_domain)) }}"+jellyfin_role_web_subdomain: "{{ jellyfin_name }}"+jellyfin_role_web_domain: "{{ user.domain }}"+jellyfin_role_web_port: "8096"+jellyfin_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='jellyfin') + '.' + lookup('role_var', '_web_domain', role='jellyfin')+ if (lookup('role_var', '_web_subdomain', role='jellyfin') | length > 0)+ else lookup('role_var', '_web_domain', role='jellyfin')) }}" ################################ # DNS ################################ -jellyfin_dns_record: "{{ lookup('vars', jellyfin_name + '_web_subdomain', default=jellyfin_web_subdomain) }}"-jellyfin_dns_zone: "{{ lookup('vars', jellyfin_name + '_web_domain', default=jellyfin_web_domain) }}"-jellyfin_dns_proxy: "{{ dns.proxied }}"+jellyfin_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='jellyfin') }}"+jellyfin_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='jellyfin') }}"+jellyfin_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -jellyfin_traefik_sso_middleware: ""-jellyfin_traefik_middleware_default: "{{ traefik_default_middleware }}"-jellyfin_traefik_middleware_custom: ""-jellyfin_traefik_certresolver: "{{ traefik_default_certresolver }}"-jellyfin_traefik_enabled: true-jellyfin_traefik_api_enabled: false-jellyfin_traefik_api_endpoint: ""-jellyfin_traefik_gzip_enabled: false+jellyfin_role_traefik_sso_middleware: ""+jellyfin_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+jellyfin_role_traefik_middleware_custom: ""+jellyfin_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+jellyfin_role_traefik_enabled: true+jellyfin_role_traefik_api_enabled: false+jellyfin_role_traefik_api_endpoint: ""+jellyfin_role_traefik_gzip_enabled: false ################################ # Config ################################ # System-jellyfin_system_settings_default:+jellyfin_role_system_settings_default: - { xpath: 'PublicPort', value: '80' } - { xpath: 'PublicHttpsPort', value: '443' } - { xpath: 'EnableFolderView', value: 'true' } - { xpath: 'QuickConnectAvailable', value: 'true' } - { xpath: 'EnableRemoteAccess', value: 'true' } - { xpath: 'ServerName', value: 'saltbox' }-jellyfin_system_settings_custom: []-jellyfin_system_settings_list: "{{ lookup('vars', jellyfin_name + '_system_settings_default', default=jellyfin_system_settings_default) + lookup('vars', jellyfin_name + '_system_settings_custom', default=jellyfin_system_settings_custom) }}"+jellyfin_role_system_settings_custom: []+jellyfin_role_system_settings_list: "{{ lookup('role_var', '_system_settings_default', role='jellyfin') + lookup('role_var', '_system_settings_custom', role='jellyfin') }}" # Network-jellyfin_network_settings_default:+jellyfin_role_network_settings_default: - { xpath: 'KnownProxies', value: 'traefik' } - { xpath: 'PublicPort', value: '80' } - { xpath: 'PublicHttpsPort', value: '443' }- - { xpath: 'PublishedServerUriBySubnet/string', value: 'external={{ jellyfin_web_url }}:443' }-jellyfin_network_settings_custom: []-jellyfin_network_settings_list: "{{ lookup('vars', jellyfin_name + '_network_settings_default', default=jellyfin_network_settings_default) + lookup('vars', jellyfin_name + '_network_settings_custom', default=jellyfin_network_settings_custom) }}"+ - { xpath: 'PublishedServerUriBySubnet/string', value: 'external={{ lookup("role_var", "_web_url", role="jellyfin") }}:443' }+jellyfin_role_network_settings_custom: []+jellyfin_role_network_settings_list: "{{ lookup('role_var', '_network_settings_default', role='jellyfin') + lookup('role_var', '_network_settings_custom', role='jellyfin') }}" ################################ # Docker ################################ # Container-jellyfin_docker_container: "{{ jellyfin_name }}"+jellyfin_role_docker_container: "{{ jellyfin_name }}" # Image-jellyfin_docker_image_pull: true-jellyfin_docker_image_repo: "ghcr.io/hotio/jellyfin"-jellyfin_docker_image_tag: "release"-jellyfin_docker_image: "{{ lookup('vars', jellyfin_name + '_docker_image_repo', default=jellyfin_docker_image_repo)- + ':' + lookup('vars', jellyfin_name + '_docker_image_tag', default=jellyfin_docker_image_tag) }}"--# Ports-jellyfin_docker_ports_defaults: []-jellyfin_docker_ports_custom: []-jellyfin_docker_ports: "{{ lookup('vars', jellyfin_name + '_docker_ports_defaults', default=jellyfin_docker_ports_defaults)- + lookup('vars', jellyfin_name + '_docker_ports_custom', default=jellyfin_docker_ports_custom) }}"+jellyfin_role_docker_image_pull: true+jellyfin_role_docker_image_repo: "ghcr.io/hotio/jellyfin"+jellyfin_role_docker_image_tag: "release"+jellyfin_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='jellyfin') }}:{{ lookup('role_var', '_docker_image_tag', role='jellyfin') }}" # Envs-jellyfin_docker_envs_default:+jellyfin_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" TZ: "{{ tz }}" DOTNET_USE_POLLING_FILE_WATCHER: "1"-jellyfin_docker_envs_custom: {}-jellyfin_docker_envs: "{{ lookup('vars', jellyfin_name + '_docker_envs_default', default=jellyfin_docker_envs_default)- | combine(lookup('vars', jellyfin_name + '_docker_envs_custom', default=jellyfin_docker_envs_custom)) }}"--# Commands-jellyfin_docker_commands_default: []-jellyfin_docker_commands_custom: []-jellyfin_docker_commands: "{{ lookup('vars', jellyfin_name + '_docker_commands_default', default=jellyfin_docker_commands_default)- + lookup('vars', jellyfin_name + '_docker_commands_custom', default=jellyfin_docker_commands_custom) }}"+jellyfin_role_docker_envs_custom: {}+jellyfin_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='jellyfin')+ | combine(lookup('role_var', '_docker_envs_custom', role='jellyfin')) }}" # Volumes-jellyfin_docker_volumes_default:- - "{{ jellyfin_paths_location }}:/config:rw"+jellyfin_role_docker_volumes_default:+ - "{{ jellyfin_role_paths_location }}:/config:rw" - "{{ server_appdata_path }}/scripts:/scripts" - "/dev/shm:/dev/shm"- - "{{ jellyfin_paths_transcodes_location }}:/transcode"-jellyfin_docker_volumes_legacy:+ - "{{ jellyfin_role_paths_transcodes_location }}:/transcode"+jellyfin_role_docker_volumes_legacy: - "/mnt/unionfs/Media:/data"-jellyfin_docker_volumes_custom: []-jellyfin_docker_volumes: "{{ lookup('vars', jellyfin_name + '_docker_volumes_default', default=jellyfin_docker_volumes_default)- + lookup('vars', jellyfin_name + '_docker_volumes_custom', default=jellyfin_docker_volumes_custom)- + (lookup('vars', jellyfin_name + '_docker_volumes_legacy', default=jellyfin_docker_volumes_legacy)- if docker_legacy_volume- else []) }}"+jellyfin_role_docker_volumes_custom: []+jellyfin_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='jellyfin')+ + lookup('role_var', '_docker_volumes_custom', role='jellyfin')+ + (lookup('role_var', '_docker_volumes_legacy', role='jellyfin')+ if docker_legacy_volume+ else []) }}" # Mounts-jellyfin_docker_mounts_default:+jellyfin_role_docker_mounts_default: - target: /tmp type: tmpfs-jellyfin_docker_mounts_custom: []-jellyfin_docker_mounts: "{{ lookup('vars', jellyfin_name + '_docker_mounts_default', default=jellyfin_docker_mounts_default)- + lookup('vars', jellyfin_name + '_docker_mounts_custom', default=jellyfin_docker_mounts_custom) }}"--# Devices-jellyfin_docker_devices_default: []-jellyfin_docker_devices_custom: []-jellyfin_docker_devices: "{{ lookup('vars', jellyfin_name + '_docker_devices_default', default=jellyfin_docker_devices_default)- + lookup('vars', jellyfin_name + '_docker_devices_custom', default=jellyfin_docker_devices_custom) }}"--# Hosts-jellyfin_docker_hosts_default: {}-jellyfin_docker_hosts_custom: {}-jellyfin_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', jellyfin_name + '_docker_hosts_default', default=jellyfin_docker_hosts_default))- | combine(lookup('vars', jellyfin_name + '_docker_hosts_custom', default=jellyfin_docker_hosts_custom)) }}"--# Labels-jellyfin_docker_labels_default: {}-jellyfin_docker_labels_custom: {}-jellyfin_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', jellyfin_name + '_docker_labels_default', default=jellyfin_docker_labels_default))- | combine(lookup('vars', jellyfin_name + '_docker_labels_custom', default=jellyfin_docker_labels_custom)) }}"+jellyfin_role_docker_mounts_custom: []+jellyfin_role_docker_mounts: "{{ lookup('role_var', '_docker_mounts_default', role='jellyfin')+ + lookup('role_var', '_docker_mounts_custom', role='jellyfin') }}" # Hostname-jellyfin_docker_hostname: "{{ jellyfin_name }}"--# Network Mode-jellyfin_docker_network_mode_default: "{{ docker_networks_name_common }}"-jellyfin_docker_network_mode: "{{ lookup('vars', jellyfin_name + '_docker_network_mode_default', default=jellyfin_docker_network_mode_default) }}"+jellyfin_role_docker_hostname: "{{ jellyfin_name }}" # Networks-jellyfin_docker_networks_alias: "{{ jellyfin_name }}"-jellyfin_docker_networks_default: []-jellyfin_docker_networks_custom: []-jellyfin_docker_networks: "{{ docker_networks_common- + lookup('vars', jellyfin_name + '_docker_networks_default', default=jellyfin_docker_networks_default)- + lookup('vars', jellyfin_name + '_docker_networks_custom', default=jellyfin_docker_networks_custom) }}"--# Capabilities-jellyfin_docker_capabilities_default: []-jellyfin_docker_capabilities_custom: []-jellyfin_docker_capabilities: "{{ lookup('vars', jellyfin_name + '_docker_capabilities_default', default=jellyfin_docker_capabilities_default)- + lookup('vars', jellyfin_name + '_docker_capabilities_custom', default=jellyfin_docker_capabilities_custom) }}"--# Security Opts-jellyfin_docker_security_opts_default: []-jellyfin_docker_security_opts_custom: []-jellyfin_docker_security_opts: "{{ lookup('vars', jellyfin_name + '_docker_security_opts_default', default=jellyfin_docker_security_opts_default)- + lookup('vars', jellyfin_name + '_docker_security_opts_custom', default=jellyfin_docker_security_opts_custom) }}"+jellyfin_role_docker_networks_alias: "{{ jellyfin_name }}"+jellyfin_role_docker_networks_default: []+jellyfin_role_docker_networks_custom: []+jellyfin_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='jellyfin')+ + lookup('role_var', '_docker_networks_custom', role='jellyfin') }}" # Restart Policy-jellyfin_docker_restart_policy: unless-stopped+jellyfin_role_docker_restart_policy: unless-stopped # State-jellyfin_docker_state: started+jellyfin_role_docker_state: started
modified
roles/jellyfin/tasks/main2.yml
@@ -9,9 +9,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -21,17 +21,17 @@ - name: Docker Devices Task ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/set_docker_devices_variable.yml"- when: gpu.intel or use_nvidia+ when: use_intel or use_nvidia - name: Import preinstall task- ansible.builtin.import_tasks: "subtasks/preinstall.yml"+ ansible.builtin.include_tasks: "subtasks/preinstall.yml" when: (not continuous_integration) - name: Create Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: Import postinstall task- ansible.builtin.import_tasks: "subtasks/postinstall.yml"+ ansible.builtin.include_tasks: "subtasks/postinstall.yml" when: - (not continuous_integration) - (not jellyfin_existing_install.stat.exists)
modified
roles/jellyfin/tasks/subtasks/postinstall.yml
@@ -12,7 +12,7 @@ - name: Post-Install | Ensure transcodes folder has the correct permissions ansible.builtin.file:- path: "{{ jellyfin_paths_transcodes_location }}"+ path: "{{ lookup('role_var', '_paths_transcodes_location', role='jellyfin') }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775"@@ -20,12 +20,12 @@ - name: Post-Install | Ensure config folder has the correct permissions ansible.builtin.file:- path: "{{ jellyfin_paths_location }}"+ path: "{{ lookup('role_var', '_paths_location', role='jellyfin') }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775" recurse: true - name: Post-Install | Settings Tasks- ansible.builtin.import_tasks: "settings.yml"+ ansible.builtin.include_tasks: "subtasks/settings.yml" when: (not jellyfin_existing_install.stat.exists)
modified
roles/jellyfin/tasks/subtasks/preinstall.yml
@@ -11,26 +11,26 @@ # Migrate old non hotio setup ################################ -- name: Preinstall MIGRATE | Check if `{{ jellyfin_paths_xml_loct_old | basename }}` exists+- name: Preinstall | Check if `{{ lookup('role_var', '_paths_xml_location_old', role='jellyfin') | basename }}` exists ansible.builtin.stat:- path: "{{ jellyfin_paths_xml_loct_old }}"+ path: "{{ lookup('role_var', '_paths_xml_location_old', role='jellyfin') }}" register: jellyfin_xml_old -- name: Preinstall MIGRATE | New `{{ jellyfin_paths_xml_loct_old | basename }}` tasks+- name: Preinstall | New `{{ lookup('role_var', '_paths_xml_location_old', role='jellyfin') | basename }}` tasks when: jellyfin_xml_old.stat.exists block:- - name: Preinstall MIGRATE | Import `{{ jellyfin_paths_xml_loct_old | basename }}` if it exists+ - name: Preinstall | Import `{{ lookup('role_var', '_paths_xml_location_old', role='jellyfin') | basename }}` if it exists ansible.builtin.copy:- src: "{{ jellyfin_paths_xml_loct_old }}"- dest: "{{ jellyfin_paths_sys_xml_location }}"+ src: "{{ lookup('role_var', '_paths_xml_location_old', role='jellyfin') }}"+ dest: "{{ lookup('role_var', '_paths_sys_xml_location', role='jellyfin') }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664" force: false - - name: Preinstall MIGRATE | Remove old `{{ jellyfin_paths_xml_loct_old | basename }}` if it exists"+ - name: Preinstall | Remove old `{{ lookup('role_var', '_paths_xml_location_old', role='jellyfin') | basename }}` if it exists" ansible.builtin.file:- path: "{{ jellyfin_paths_xml_loct_old }}"+ path: "{{ lookup('role_var', '_paths_xml_location_old', role='jellyfin') }}" state: absent # Low priority as migrate should not be needed.@@ -41,46 +41,46 @@ # Register | STATS 'n' FACTS ################################ -- name: Preinstall STAT | Check if `{{ jellyfin_paths_sys_xml_location | basename }}` exists+- name: Preinstall | Check if `{{ lookup('role_var', '_paths_sys_xml_location', role='jellyfin') | basename }}` exists ansible.builtin.stat:- path: "{{ jellyfin_paths_sys_xml_location }}"+ path: "{{ lookup('role_var', '_paths_sys_xml_location', role='jellyfin') }}" register: jellyfin_sys_xml -- name: Preinstall STAT | Check also if `{{ jellyfin_paths_sys_xml_location | basename }}` exists+- name: Preinstall | Check also if `{{ lookup('role_var', '_paths_sys_xml_location', role='jellyfin') | basename }}` exists ansible.builtin.stat:- path: "{{ jellyfin_paths_sys_xml_location }}"+ path: "{{ lookup('role_var', '_paths_sys_xml_location', role='jellyfin') }}" register: jellyfin_existing_install -- name: Preinstall STAT | Check if `{{ jellyfin_paths_net_xml_location | basename }}` exists+- name: Preinstall | Check if `{{ lookup('role_var', '_paths_net_xml_location', role='jellyfin') | basename }}` exists ansible.builtin.stat:- path: "{{ jellyfin_paths_net_xml_location }}"+ path: "{{ lookup('role_var', '_paths_net_xml_location', role='jellyfin') }}" register: network_xml -- name: Preinstall STAT | Check if `{{ jellyfin_paths_dlna_location | basename }}` exists+- name: Preinstall | Check if `{{ lookup('role_var', '_paths_dlna_location', role='jellyfin') | basename }}` exists ansible.builtin.stat:- path: "{{ jellyfin_paths_dlna_location }}"+ path: "{{ lookup('role_var', '_paths_dlna_location', role='jellyfin') }}" register: jellyfin_dlna ################################ # Block DLNA network spam ################################ -- name: Import 'dlna.xml' config+- name: Preinstall | Import 'dlna.xml' config ansible.builtin.copy: src: "dlna.xml"- dest: "{{ jellyfin_paths_dlna_location }}"+ dest: "{{ lookup('role_var', '_paths_dlna_location', role='jellyfin') }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664" force: false when: (not jellyfin_dlna.stat.exists) -- name: Settings | Update 'network.xml' file+- name: Preinstall | Update 'network.xml' file community.general.xml:- path: "{{ jellyfin_paths_net_xml_location }}"+ path: "{{ lookup('role_var', '_paths_net_xml_location', role='jellyfin') }}" xpath: "/NetworkConfiguration/{{ item.xpath }}" value: "{{ item.value }}" become: true become_user: "{{ user.name }}"- loop: "{{ jellyfin_network_settings_list }}"+ loop: "{{ lookup('role_var', '_network_settings_list', role='jellyfin') }}" when: network_xml.stat.exists
modified
roles/jellyfin/tasks/subtasks/settings.yml
@@ -6,43 +6,43 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Restart Docker container+- name: Settings | Restart Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/restart_docker_container.yml" -- name: Post-Install | Wait for 'system.xml' file to be created+- name: Settings | Wait for 'system.xml' file to be created ansible.builtin.wait_for:- path: "{{ jellyfin_paths_sys_xml_location }}"+ path: "{{ lookup('role_var', '_paths_sys_xml_location', role='jellyfin') }}" state: present -- name: "Post-Install | Prompt user to complete the wizard"+- name: "Settings | Prompt user to complete the wizard" ansible.builtin.pause:- prompt: "Please open {{ jellyfin_web_url }}/web/index.html#!/wizardstart.html. Hit enter after having finished the wizard"+ prompt: "Please open {{ lookup('role_var', '_web_url', role='jellyfin') }}/web/index.html#!/wizardstart.html. Hit enter after having finished the wizard" -- name: Post-Install | Wait for 'network.xml' file to be created+- name: Settings | Wait for 'network.xml' file to be created ansible.builtin.wait_for:- path: "{{ jellyfin_paths_net_xml_location }}"+ path: "{{ lookup('role_var', '_paths_net_xml_location', role='jellyfin') }}" state: present -- name: Stop Docker container+- name: Settings | Stop Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/stop_docker_container.yml" - name: Settings | Update 'system.xml' file community.general.xml:- path: "{{ jellyfin_paths_sys_xml_location }}"+ path: "{{ lookup('role_var', '_paths_sys_xml_location', role='jellyfin') }}" xpath: "/ServerConfiguration/{{ item.xpath }}" value: "{{ item.value }}" become: true become_user: "{{ user.name }}"- loop: "{{ jellyfin_system_settings_list }}"+ loop: "{{ lookup('role_var', '_system_settings_list', role='jellyfin') }}" - name: Settings | Update 'network.xml' file community.general.xml:- path: "{{ jellyfin_paths_net_xml_location }}"+ path: "{{ lookup('role_var', '_paths_net_xml_location', role='jellyfin') }}" xpath: "/NetworkConfiguration/{{ item.xpath }}" value: "{{ item.value }}" become: true become_user: "{{ user.name }}"- loop: "{{ jellyfin_network_settings_list }}"+ loop: "{{ lookup('role_var', '_network_settings_list', role='jellyfin') }}" -- name: Start Docker container+- name: Settings | Start Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_docker_container.yml"
modified
roles/kernel/tasks/main.yml
@@ -18,9 +18,9 @@ - name: Ubuntu 20.04 HWE Update Tasks ansible.builtin.include_tasks: "subtasks/03_update_focal.yml" when:- - (ansible_distribution == 'Ubuntu')- - (ansible_distribution_version is version('20.04', '=='))- - (ansible_kernel is version('5.8', '<'))+ - (ansible_facts['distribution'] == 'Ubuntu')+ - (ansible_facts['distribution_version'] is version('20.04', '=='))+ - (ansible_facts['kernel'] is version('5.8', '<')) - (not continuous_integration) - kernel_install_hwe @@ -28,9 +28,9 @@ - name: Ubuntu 22.04 Update Tasks ansible.builtin.include_tasks: "subtasks/04_update_jammy.yml" when:- - (ansible_distribution == 'Ubuntu')- - (ansible_distribution_version is version('22.04', '=='))- - (ansible_kernel is version('5.19', '<'))+ - (ansible_facts['distribution'] == 'Ubuntu')+ - (ansible_facts['distribution_version'] is version('22.04', '=='))+ - (ansible_facts['kernel'] is version('5.19', '<')) - (not continuous_integration) - kernel_install_hwe @@ -38,9 +38,9 @@ - name: Ubuntu 24.04 Update Tasks ansible.builtin.include_tasks: "subtasks/05_update_noble.yml" when:- - (ansible_distribution == 'Ubuntu')- - (ansible_distribution_version is version('24.04', '=='))- - (ansible_kernel is version('6.11', '<'))+ - (ansible_facts['distribution'] == 'Ubuntu')+ - (ansible_facts['distribution_version'] is version('24.04', '=='))+ - (ansible_facts['kernel'] is version('6.11', '<')) - (not continuous_integration) - kernel_install_hwe
modified
roles/kernel/tasks/subtasks/02_cron.yml
@@ -10,7 +10,7 @@ - name: Cron | Install cron ansible.builtin.apt: name: cron- state: present+ state: latest - name: Cron | Purge old kernels on reboot ansible.builtin.cron:
modified
roles/kernel/tasks/subtasks/03_update_focal.yml
@@ -10,6 +10,7 @@ - name: Update | Update linux kernel ansible.builtin.apt: name: "{{ kernel_update_apt_package }}"+ install_recommends: true update_cache: true state: latest register: r
modified
roles/kernel/tasks/subtasks/04_update_jammy.yml
@@ -10,6 +10,7 @@ - name: Update | Update linux kernel ansible.builtin.apt: name: "{{ kernel_update_apt_package_22 }}"+ install_recommends: true update_cache: true state: latest register: r@@ -19,8 +20,8 @@ name: - gcc-12 - g++-12- state: present- update_cache: yes+ state: latest+ update_cache: true - name: Update | Set gcc-12 and g++-12 as the default compilers ansible.builtin.shell: update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 --slave /usr/bin/g++ g++ /usr/bin/g++-12
modified
roles/kernel/tasks/subtasks/05_update_noble.yml
@@ -10,6 +10,7 @@ - name: Update | Update linux kernel ansible.builtin.apt: name: "{{ kernel_update_apt_package_24 }}"+ install_recommends: true update_cache: true state: latest register: r
modified
roles/lidarr/defaults/main.yml
@@ -17,167 +17,121 @@ # Settings ################################ -lidarr_external_auth: true+lidarr_role_external_auth: true ################################ # Paths ################################ -lidarr_paths_folder: "{{ lidarr_name }}"-lidarr_paths_location: "{{ server_appdata_path }}/{{ lidarr_paths_folder }}"-lidarr_paths_folders_list:- - "{{ lidarr_paths_location }}"-lidarr_paths_config_location: "{{ lidarr_paths_location }}/config.xml"+lidarr_role_paths_folder: "{{ lidarr_name }}"+lidarr_role_paths_location: "{{ server_appdata_path }}/{{ lidarr_role_paths_folder }}"+lidarr_role_paths_folders_list:+ - "{{ lidarr_role_paths_location }}"+lidarr_role_paths_config_location: "{{ lidarr_role_paths_location }}/config.xml" ################################ # Web ################################ -lidarr_web_subdomain: "{{ lidarr_name }}"-lidarr_web_domain: "{{ user.domain }}"-lidarr_web_port: "8686"-lidarr_web_url: "{{ 'https://' + (lookup('vars', lidarr_name + '_web_subdomain', default=lidarr_web_subdomain) + '.' + lookup('vars', lidarr_name + '_web_domain', default=lidarr_web_domain)- if (lookup('vars', lidarr_name + '_web_subdomain', default=lidarr_web_subdomain) | length > 0)- else lookup('vars', lidarr_name + '_web_domain', default=lidarr_web_domain)) }}"+lidarr_role_web_subdomain: "{{ lidarr_name }}"+lidarr_role_web_domain: "{{ user.domain }}"+lidarr_role_web_port: "8686"+lidarr_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='lidarr') + '.' + lookup('role_var', '_web_domain', role='lidarr')+ if (lookup('role_var', '_web_subdomain', role='lidarr') | length > 0)+ else lookup('role_var', '_web_domain', role='lidarr')) }}" ################################ # DNS ################################ -lidarr_dns_record: "{{ lookup('vars', lidarr_name + '_web_subdomain', default=lidarr_web_subdomain) }}"-lidarr_dns_zone: "{{ lookup('vars', lidarr_name + '_web_domain', default=lidarr_web_domain) }}"-lidarr_dns_proxy: "{{ dns.proxied }}"+lidarr_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='lidarr') }}"+lidarr_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='lidarr') }}"+lidarr_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -lidarr_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-lidarr_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', lidarr_name + '_name', default=lidarr_name)- if (lidarr_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-lidarr_traefik_middleware_custom: ""-lidarr_traefik_certresolver: "{{ traefik_default_certresolver }}"-lidarr_traefik_enabled: true-lidarr_traefik_api_enabled: true-lidarr_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/feed`) || PathPrefix(`/ping`)"+lidarr_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+lidarr_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + lidarr_name+ if (lookup('role_var', '_themepark_enabled', role='lidarr') and global_themepark_plugin_enabled)+ else '') }}"+lidarr_role_traefik_middleware_custom: ""+lidarr_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+lidarr_role_traefik_enabled: true+lidarr_role_traefik_api_enabled: true+lidarr_role_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/feed`) || PathPrefix(`/ping`)" ################################-# API-################################--# default to blank-lidarr_api_key:--################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-lidarr_themepark_enabled: false-lidarr_themepark_app: "lidarr"-lidarr_themepark_theme: "{{ global_themepark_theme }}"-lidarr_themepark_domain: "{{ global_themepark_domain }}"-lidarr_themepark_addons: []+lidarr_role_themepark_enabled: false+lidarr_role_themepark_app: "lidarr"+lidarr_role_themepark_theme: "{{ global_themepark_theme }}"+lidarr_role_themepark_domain: "{{ global_themepark_domain }}"+lidarr_role_themepark_addons: [] ################################ # Docker ################################ # Container-lidarr_docker_container: "{{ lidarr_name }}"+lidarr_role_docker_container: "{{ lidarr_name }}" # Image-lidarr_docker_image_pull: true-lidarr_docker_image_repo: "ghcr.io/hotio/lidarr"-lidarr_docker_image_tag: "release"-lidarr_docker_image: "{{ lookup('vars', lidarr_name + '_docker_image_repo', default=lidarr_docker_image_repo)- + ':' + lookup('vars', lidarr_name + '_docker_image_tag', default=lidarr_docker_image_tag) }}"--# Ports-lidarr_docker_ports_defaults: []-lidarr_docker_ports_custom: []-lidarr_docker_ports: "{{ lookup('vars', lidarr_name + '_docker_ports_defaults', default=lidarr_docker_ports_defaults)- + lookup('vars', lidarr_name + '_docker_ports_custom', default=lidarr_docker_ports_custom) }}"+lidarr_role_docker_image_pull: true+lidarr_role_docker_image_repo: "ghcr.io/hotio/lidarr"+lidarr_role_docker_image_tag: "release"+lidarr_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='lidarr') }}:{{ lookup('role_var', '_docker_image_tag', role='lidarr') }}" # Envs-lidarr_docker_envs_default:+lidarr_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" UMASK: "002" TZ: "{{ tz }}"-lidarr_docker_envs_custom: {}-lidarr_docker_envs: "{{ lookup('vars', lidarr_name + '_docker_envs_default', default=lidarr_docker_envs_default)- | combine(lookup('vars', lidarr_name + '_docker_envs_custom', default=lidarr_docker_envs_custom)) }}"--# Commands-lidarr_docker_commands_default: []-lidarr_docker_commands_custom: []-lidarr_docker_commands: "{{ lookup('vars', lidarr_name + '_docker_commands_default', default=lidarr_docker_commands_default)- + lookup('vars', lidarr_name + '_docker_commands_custom', default=lidarr_docker_commands_custom) }}"+lidarr_role_docker_envs_custom: {}+lidarr_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='lidarr')+ | combine(lookup('role_var', '_docker_envs_custom', role='lidarr')) }}" # Volumes-lidarr_docker_volumes_default:- - "{{ lidarr_paths_location }}:/config"+lidarr_role_docker_volumes_default:+ - "{{ lidarr_role_paths_location }}:/config" - "{{ server_appdata_path }}/scripts:/scripts"+lidarr_role_docker_volumes_legacy: - "/mnt/unionfs/Media/Music:/music"-lidarr_docker_volumes_custom: []-lidarr_docker_volumes: "{{ lookup('vars', lidarr_name + '_docker_volumes_default', default=lidarr_docker_volumes_default)- + lookup('vars', lidarr_name + '_docker_volumes_custom', default=lidarr_docker_volumes_custom) }}"--# Devices-lidarr_docker_devices_default: []-lidarr_docker_devices_custom: []-lidarr_docker_devices: "{{ lookup('vars', lidarr_name + '_docker_devices_default', default=lidarr_docker_devices_default)- + lookup('vars', lidarr_name + '_docker_devices_custom', default=lidarr_docker_devices_custom) }}"--# Hosts-lidarr_docker_hosts_default: {}-lidarr_docker_hosts_custom: {}-lidarr_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', lidarr_name + '_docker_hosts_default', default=lidarr_docker_hosts_default))- | combine(lookup('vars', lidarr_name + '_docker_hosts_custom', default=lidarr_docker_hosts_custom)) }}"+lidarr_role_docker_volumes_custom: []+lidarr_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='lidarr')+ + lookup('role_var', '_docker_volumes_custom', role='lidarr')+ + (lookup('role_var', '_docker_volumes_legacy', role='lidarr')+ if docker_legacy_volume+ else []) }}" # Labels-lidarr_docker_labels_default: {}-lidarr_docker_labels_custom: {}-lidarr_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', lidarr_name + '_docker_labels_default', default=lidarr_docker_labels_default))- | combine((traefik_themepark_labels- if (lidarr_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', lidarr_name + '_docker_labels_custom', default=lidarr_docker_labels_custom)) }}"+lidarr_role_docker_labels_default: {}+lidarr_role_docker_labels_custom: {}+lidarr_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='lidarr')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='lidarr') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='lidarr')) }}" # Hostname-lidarr_docker_hostname: "{{ lidarr_name }}"--# Network Mode-lidarr_docker_network_mode_default: "{{ docker_networks_name_common }}"-lidarr_docker_network_mode: "{{ lookup('vars', lidarr_name + '_docker_network_mode_default', default=lidarr_docker_network_mode_default) }}"+lidarr_role_docker_hostname: "{{ lidarr_name }}" # Networks-lidarr_docker_networks_alias: "{{ lidarr_name }}"-lidarr_docker_networks_default: []-lidarr_docker_networks_custom: []-lidarr_docker_networks: "{{ docker_networks_common- + lookup('vars', lidarr_name + '_docker_networks_default', default=lidarr_docker_networks_default)- + lookup('vars', lidarr_name + '_docker_networks_custom', default=lidarr_docker_networks_custom) }}"--# Capabilities-lidarr_docker_capabilities_default: []-lidarr_docker_capabilities_custom: []-lidarr_docker_capabilities: "{{ lookup('vars', lidarr_name + '_docker_capabilities_default', default=lidarr_docker_capabilities_default)- + lookup('vars', lidarr_name + '_docker_capabilities_custom', default=lidarr_docker_capabilities_custom) }}"--# Security Opts-lidarr_docker_security_opts_default: []-lidarr_docker_security_opts_custom: []-lidarr_docker_security_opts: "{{ lookup('vars', lidarr_name + '_docker_security_opts_default', default=lidarr_docker_security_opts_default)- + lookup('vars', lidarr_name + '_docker_security_opts_custom', default=lidarr_docker_security_opts_custom) }}"+lidarr_role_docker_networks_alias: "{{ lidarr_name }}"+lidarr_role_docker_networks_default: []+lidarr_role_docker_networks_custom: []+lidarr_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='lidarr')+ + lookup('role_var', '_docker_networks_custom', role='lidarr') }}" # Restart Policy-lidarr_docker_restart_policy: unless-stopped+lidarr_role_docker_restart_policy: unless-stopped # State-lidarr_docker_state: started+lidarr_role_docker_state: started
modified
roles/lidarr/tasks/main2.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -24,5 +24,5 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: "Tweak Settings when SSO is enabled"- ansible.builtin.import_tasks: "subtasks/auth.yml"- when: (lookup('vars', lidarr_name + '_traefik_sso_middleware', default=lidarr_traefik_sso_middleware) | length > 0) and lookup('vars', lidarr_name + '_external_auth', default=lidarr_external_auth)+ ansible.builtin.include_tasks: "subtasks/auth.yml"+ when: (lookup('role_var', '_traefik_sso_middleware', role='lidarr') | length > 0) and lookup('role_var', '_external_auth', role='lidarr')
modified
roles/lidarr/tasks/subtasks/auth.yml
@@ -9,7 +9,7 @@ --- - name: Auth | Wait for 'config.xml' to be created ansible.builtin.wait_for:- path: "/opt/{{ lidarr_name }}/config.xml"+ path: "{{ lookup('role_var', '_paths_config_location', role='lidarr') }}" state: present - name: Auth | Wait for 10 seconds@@ -18,7 +18,7 @@ - name: Auth | Lookup AuthenticationMethod value community.general.xml:- path: "/opt/{{ lidarr_name }}/config.xml"+ path: "{{ lookup('role_var', '_paths_config_location', role='lidarr') }}" xpath: "/Config/AuthenticationMethod" content: "text" register: xmlresp@@ -28,7 +28,7 @@ block: - name: Auth | Change the 'AuthenticationMethod' attribute to 'External' community.general.xml:- path: "/opt/{{ lidarr_name }}/config.xml"+ path: "{{ lookup('role_var', '_paths_config_location', role='lidarr') }}" xpath: "/Config/AuthenticationMethod" value: "External"
modified
roles/lldap/defaults/main.yml
@@ -18,150 +18,113 @@ ################################ # Toggles if the configuration template uses SMTP or not.-lldap_smtp_enabled: false+lldap_role_smtp_enabled: false # The SMTP server.-lldap_smtp_server: "smtp.gmail.com"+lldap_role_smtp_server: "smtp.gmail.com" # The SMTP port.-lldap_smtp_port: "587"+lldap_role_smtp_port: "587" # How the connection is encrypted, either "NONE" (no encryption), "TLS" or "STARTTLS".-lldap_smtp_encryption: "TLS"+lldap_role_smtp_encryption: "TLS" # The SMTP user, usually your email address.-lldap_smtp_user: "sender@gmail.com"+lldap_role_smtp_user: "sender@gmail.com" # The SMTP password.-lldap_smtp_password: "password"+lldap_role_smtp_password: "password" # is a free-form name, followed by an email between <>.-lldap_smtp_from: "LLDAP Admin <sender@gmail.com>"+lldap_role_smtp_from: "LLDAP Admin <sender@gmail.com>" # The header field, optional: how the sender appears in the email. # The first is a free-form name, followed by an email between <>.-lldap_smtp_reply_to: "Do not reply <noreply@localhost>"+lldap_role_smtp_reply_to: "Do not reply <noreply@localhost>" ################################ # Paths ################################ -lldap_paths_folder: "{{ lldap_name }}"-lldap_paths_location: "{{ server_appdata_path }}/{{ lldap_paths_folder }}"-lldap_paths_folders_list:- - "{{ lldap_paths_location }}"+lldap_role_paths_folder: "{{ lldap_name }}"+lldap_role_paths_location: "{{ server_appdata_path }}/{{ lldap_role_paths_folder }}"+lldap_role_paths_folders_list:+ - "{{ lldap_role_paths_location }}" ################################ # Web ################################ -lldap_web_subdomain: "lldap"-lldap_web_domain: "{{ user.domain }}"-lldap_web_port: "17170"-lldap_web_url: "{{ 'https://' + (lldap_web_subdomain + '.' + lldap_web_domain- if (lldap_web_subdomain | length > 0)- else lldap_web_domain) }}"+lldap_role_web_subdomain: "lldap"+lldap_role_web_domain: "{{ user.domain }}"+lldap_role_web_port: "17170"+lldap_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='lldap') + '.' + lookup('role_var', '_web_domain', role='lldap')+ if (lookup('role_var', '_web_subdomain', role='lldap') | length > 0)+ else lookup('role_var', '_web_domain', role='lldap')) }}" ################################ # DNS ################################ -lldap_dns_record: "{{ lldap_web_subdomain }}"-lldap_dns_zone: "{{ lldap_web_domain }}"-lldap_dns_proxy: "{{ dns.proxied }}"+lldap_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='lldap') }}"+lldap_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='lldap') }}"+lldap_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -lldap_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-lldap_traefik_middleware_default: "{{ traefik_default_middleware }}"-lldap_traefik_middleware_custom: ""-lldap_traefik_certresolver: "{{ traefik_default_certresolver }}"-lldap_traefik_enabled: true-lldap_traefik_api_enabled: false-lldap_traefik_api_endpoint: ""+lldap_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+lldap_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+lldap_role_traefik_middleware_custom: ""+lldap_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+lldap_role_traefik_enabled: true+lldap_role_traefik_api_enabled: false+lldap_role_traefik_api_endpoint: "" ################################ # Docker ################################ # Container-lldap_docker_container: "{{ lldap_name }}"+lldap_role_docker_container: "{{ lldap_name }}" # Image-lldap_docker_image_pull: true-lldap_docker_image_repo: "nitnelave/lldap"-lldap_docker_image_tag: "stable"-lldap_docker_image: "{{ lldap_docker_image_repo- + ':' + lldap_docker_image_tag }}"--# Ports-lldap_docker_ports_defaults: []-lldap_docker_ports_custom: []-lldap_docker_ports: "{{ lldap_docker_ports_defaults- + lldap_docker_ports_custom }}"+lldap_role_docker_image_pull: true+lldap_role_docker_image_repo: "nitnelave/lldap"+lldap_role_docker_image_tag: "stable"+lldap_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='lldap') }}:{{ lookup('role_var', '_docker_image_tag', role='lldap') }}" # Envs-lldap_docker_envs_default:+lldap_role_docker_envs_default: TZ: "{{ tz }}" UID: "{{ uid }}" GID: "{{ gid }}"-lldap_docker_envs_custom: {}-lldap_docker_envs: "{{ lldap_docker_envs_default- | combine(lldap_docker_envs_custom) }}"--# Commands-lldap_docker_commands_default: []-lldap_docker_commands_custom: []-lldap_docker_commands: "{{ lldap_docker_commands_default- + lldap_docker_commands_custom }}"+lldap_role_docker_envs_custom: {}+lldap_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='lldap')+ | combine(lookup('role_var', '_docker_envs_custom', role='lldap')) }}" # Volumes-lldap_docker_volumes_default:- - "{{ lldap_paths_location }}:/data"-lldap_docker_volumes_custom: []-lldap_docker_volumes: "{{ lldap_docker_volumes_default- + lldap_docker_volumes_custom }}"--# Devices-lldap_docker_devices_default: []-lldap_docker_devices_custom: []-lldap_docker_devices: "{{ lldap_docker_devices_default- + lldap_docker_devices_custom }}"--# Hosts-lldap_docker_hosts_default: {}-lldap_docker_hosts_custom: {}-lldap_docker_hosts: "{{ docker_hosts_common- | combine(lldap_docker_hosts_default)- | combine(lldap_docker_hosts_custom) }}"--# Labels-lldap_docker_labels_default: {}-lldap_docker_labels_custom: {}-lldap_docker_labels: "{{ docker_labels_common- | combine(lldap_docker_labels_default)- | combine(lldap_docker_labels_custom) }}"+lldap_role_docker_volumes_default:+ - "{{ lldap_role_paths_location }}:/data"+lldap_role_docker_volumes_custom: []+lldap_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='lldap')+ + lookup('role_var', '_docker_volumes_custom', role='lldap') }}" # Hostname-lldap_docker_hostname: "{{ lldap_name }}"+lldap_role_docker_hostname: "{{ lldap_name }}" # Networks-lldap_docker_networks_alias: "{{ lldap_name }}"-lldap_docker_networks_default: []-lldap_docker_networks_custom: []-lldap_docker_networks: "{{ docker_networks_common- + lldap_docker_networks_default- + lldap_docker_networks_custom }}"--# Capabilities-lldap_docker_capabilities_default: []-lldap_docker_capabilities_custom: []-lldap_docker_capabilities: "{{ lldap_docker_capabilities_default- + lldap_docker_capabilities_custom }}"--# Security Opts-lldap_docker_security_opts_default: []-lldap_docker_security_opts_custom: []-lldap_docker_security_opts: "{{ lldap_docker_security_opts_default- + lldap_docker_security_opts_custom }}"+lldap_role_docker_networks_alias: "{{ lldap_name }}"+lldap_role_docker_networks_default: []+lldap_role_docker_networks_custom: []+lldap_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='lldap')+ + lookup('role_var', '_docker_networks_custom', role='lldap') }}" # Restart Policy-lldap_docker_restart_policy: unless-stopped+lldap_role_docker_restart_policy: unless-stopped # State-lldap_docker_state: started+lldap_role_docker_state: started++lldap_role_docker_healthcheck:+ test: ["CMD", "/app/lldap", "healthcheck", "--config-file", "/data/lldap_config.toml"]+ interval: 30s+ timeout: 30s+ retries: 3+ start_period: 30s+ start_interval: 1s
modified
roles/lldap/tasks/main.yml
@@ -10,16 +10,16 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml" - name: Reset lldap directory ansible.builtin.file:- path: "{{ lldap_paths_location }}"+ path: "{{ lldap_role_paths_location }}" state: absent when: ('lldap-reset' in ansible_run_tags) @@ -28,13 +28,13 @@ - name: Check if 'lldap_config.toml' exists ansible.builtin.stat:- path: "{{ lldap_paths_location }}/lldap_config.toml"+ path: "{{ lldap_role_paths_location }}/lldap_config.toml" register: lldap_config_stat - name: Import default 'lldap_config.toml' ansible.builtin.template: src: lldap_config.toml.j2- dest: "{{ lldap_paths_location }}/lldap_config.toml"+ dest: "{{ lldap_role_paths_location }}/lldap_config.toml" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"
modified
roles/lldap/templates/lldap_config.toml.j2
@@ -27,7 +27,7 @@ http_port = 17170 ## The public URL of the server, for password reset links.-http_url = "{{ lldap_web_url }}"+http_url = "{{ lookup('role_var', '_web_url', role='lldap') }}" ## Random secret for JWT signature. ## This secret should be random, and should be shared with application@@ -116,15 +116,15 @@ ## To set these options from environment variables, use the following format ## (example with "password"): LLDAP_SMTP_OPTIONS__PASSWORD [smtp_options]-{% if lldap_smtp_enabled %}+{% if lookup('role_var', '_smtp_enabled', role='lldap') %} enable_password_reset=true-server="{{ lldap_smtp_server }}"-port={{ lldap_smtp_port }}-smtp_encryption="{{ lldap_smtp_encryption }}"-user="{{ lldap_smtp_user }}"-password="{{ lldap_smtp_password }}"-from="{{ lldap_smtp_from }}"-reply_to="{{ lldap_smtp_reply_to }}"+server="{{ lookup('role_var', '_smtp_server', role='lldap') }}"+port={{ lookup('role_var', '_smtp_port', role='lldap') }}+smtp_encryption="{{ lookup('role_var', '_smtp_encryption', role='lldap') }}"+user="{{ lookup('role_var', '_smtp_user', role='lldap') }}"+password="{{ lookup('role_var', '_smtp_password', role='lldap') }}"+from="{{ lookup('role_var', '_smtp_from', role='lldap') }}"+reply_to="{{ lookup('role_var', '_smtp_reply_to', role='lldap') }}" {% else %} ## Whether to enabled password reset via email, from LLDAP. #enable_password_reset=true
modified
roles/mainline/tasks/main.yml
@@ -14,7 +14,7 @@ - name: Identify apt source files ansible.builtin.find: paths: /etc/apt/sources.list.d/- recurse: no+ recurse: false register: apt_source_files - name: Check if file contains 'cappelikan'@@ -32,7 +32,7 @@ - name: Update APT package index ansible.builtin.apt:- update_cache: yes+ update_cache: true - name: Ensure '/etc/apt/keyrings' exists ansible.builtin.file:@@ -56,7 +56,7 @@ - name: Add cappelikan/ppa ansible.builtin.apt_repository:- repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/cappelikan.asc] https://ppa.launchpadcontent.net/cappelikan/ppa/ubuntu {{ ansible_distribution_release }} main"+ repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/cappelikan.asc] https://ppa.launchpadcontent.net/cappelikan/ppa/ubuntu {{ ansible_facts['distribution_release'] }} main" state: present update_cache: true register: result
modified
roles/mariadb/defaults/main.yml
@@ -17,125 +17,76 @@ # Settings ################################ -mariadb_docker_env_password: "password321"-mariadb_docker_env_user: "{{ user.name }}"-mariadb_docker_env_db: "saltbox"+mariadb_role_docker_env_password: "password321"+mariadb_role_docker_env_user: "{{ user.name }}"+mariadb_role_docker_env_db: "saltbox" ################################ # Paths ################################ -mariadb_paths_folder: "{{ mariadb_name }}"-mariadb_paths_location: "{{ server_appdata_path }}/{{ mariadb_paths_folder }}"-mariadb_paths_folders_list:- - "{{ mariadb_paths_location }}"+mariadb_role_paths_folder: "{{ mariadb_name }}"+mariadb_role_paths_location: "{{ server_appdata_path }}/{{ mariadb_role_paths_folder }}"+mariadb_role_paths_folders_list:+ - "{{ mariadb_role_paths_location }}" ################################ # Migration Settings ################################ -mariadb_docker_envs_mysql_root_password: password321-mariadb_docker_image_migration: "lscr.io/linuxserver/mariadb:10.6.13"-mariadb_docker_volumes_migration:- - "{{ mariadb_paths_location }}:/config"+mariadb_role_docker_envs_mysql_root_password: password321+mariadb_role_docker_image_migration: "lscr.io/linuxserver/mariadb:10.6.13"+mariadb_role_docker_volumes_migration:+ - "{{ mariadb_role_paths_location }}:/config" ################################ # Docker ################################ # Container-mariadb_docker_container: "{{ mariadb_name }}"+mariadb_role_docker_container: "{{ mariadb_name }}" # Image-mariadb_docker_image_pull: true-mariadb_docker_image_repo: "mariadb"-mariadb_docker_image_tag: "10"-mariadb_docker_image: "{{ lookup('vars', mariadb_name + '_docker_image_repo', default=mariadb_docker_image_repo)- + ':' + lookup('vars', mariadb_name + '_docker_image_tag', default=mariadb_docker_image_tag) }}"--# Ports-mariadb_docker_ports_defaults: []-mariadb_docker_ports_custom: []-mariadb_docker_ports: "{{ lookup('vars', mariadb_name + '_docker_ports_default', default=mariadb_docker_ports_defaults)- + lookup('vars', mariadb_name + '_docker_ports_custom', default=mariadb_docker_ports_custom) }}"+mariadb_role_docker_image_pull: true+mariadb_role_docker_image_repo: "mariadb"+mariadb_role_docker_image_tag: "10"+mariadb_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='mariadb') }}:{{ lookup('role_var', '_docker_image_tag', role='mariadb') }}" # Envs-mariadb_docker_envs_default:+mariadb_role_docker_envs_default: TZ: "{{ tz }}"- MARIADB_ROOT_PASSWORD: "{{ lookup('vars', mariadb_name + '_docker_env_password', default=mariadb_docker_env_password) }}"- MARIADB_USER: "{{ lookup('vars', mariadb_name + '_docker_env_user', default=mariadb_docker_env_user) }}"- MARIADB_PASSWORD: "{{ lookup('vars', mariadb_name + '_docker_env_password', default=mariadb_docker_env_password) }}"- MARIADB_DATABASE: "{{ lookup('vars', mariadb_name + '_docker_env_db', default=mariadb_docker_env_db) }}"+ MARIADB_ROOT_PASSWORD: "{{ lookup('role_var', '_docker_env_password', role='mariadb') }}"+ MARIADB_USER: "{{ lookup('role_var', '_docker_env_user', role='mariadb') }}"+ MARIADB_PASSWORD: "{{ lookup('role_var', '_docker_env_password', role='mariadb') }}"+ MARIADB_DATABASE: "{{ lookup('role_var', '_docker_env_db', role='mariadb') }}" MARIADB_AUTO_UPGRADE: "1"-mariadb_docker_envs_custom: {}-mariadb_docker_envs: "{{ lookup('vars', mariadb_name + '_docker_envs_default', default=mariadb_docker_envs_default)- | combine(lookup('vars', mariadb_name + '_docker_envs_custom', default=mariadb_docker_envs_custom)) }}"--# Commands-mariadb_docker_commands_default: []-mariadb_docker_commands_custom: []-mariadb_docker_commands: "{{ lookup('vars', mariadb_name + '_docker_commands_default', default=mariadb_docker_commands_default)- + lookup('vars', mariadb_name + '_docker_commands_custom', default=mariadb_docker_commands_custom) }}"+mariadb_role_docker_envs_custom: {}+mariadb_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='mariadb')+ | combine(lookup('role_var', '_docker_envs_custom', role='mariadb')) }}" # Volumes-mariadb_docker_volumes_default:- - "{{ mariadb_paths_location }}:/var/lib/mysql"-mariadb_docker_volumes_custom: []-mariadb_docker_volumes: "{{ lookup('vars', mariadb_name + '_docker_volumes_default', default=mariadb_docker_volumes_default)- + lookup('vars', mariadb_name + '_docker_volumes_custom', default=mariadb_docker_volumes_custom) }}"--# Devices-mariadb_docker_devices_default: []-mariadb_docker_devices_custom: []-mariadb_docker_devices: "{{ lookup('vars', mariadb_name + '_docker_devices_default', default=mariadb_docker_devices_default)- + lookup('vars', mariadb_name + '_docker_devices_custom', default=mariadb_docker_devices_custom) }}"--# Hosts-mariadb_docker_hosts_default: {}-mariadb_docker_hosts_custom: {}-mariadb_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', mariadb_name + '_docker_hosts_default', default=mariadb_docker_hosts_default))- | combine(lookup('vars', mariadb_name + '_docker_hosts_custom', default=mariadb_docker_hosts_custom)) }}"--# Labels-mariadb_docker_labels_default: {}-mariadb_docker_labels_custom: {}-mariadb_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', mariadb_name + '_docker_labels_default', default=mariadb_docker_labels_default))- | combine(lookup('vars', mariadb_name + '_docker_labels_custom', default=mariadb_docker_labels_custom)) }}"+mariadb_role_docker_volumes_default:+ - "{{ mariadb_role_paths_location }}:/var/lib/mysql"+mariadb_role_docker_volumes_custom: []+mariadb_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='mariadb')+ + lookup('role_var', '_docker_volumes_custom', role='mariadb') }}" # Hostname-mariadb_docker_hostname: "{{ mariadb_name }}"--# Network Mode-mariadb_docker_network_mode_default: "{{ docker_networks_name_common }}"-mariadb_docker_network_mode: "{{ lookup('vars', mariadb_name + '_docker_network_mode_default', default=mariadb_docker_network_mode_default) }}"+mariadb_role_docker_hostname: "{{ mariadb_name }}" # Networks-mariadb_docker_networks_alias: "{{ mariadb_name }}"-mariadb_docker_networks_default: []-mariadb_docker_networks_custom: []-mariadb_docker_networks: "{{ docker_networks_common- + lookup('vars', mariadb_name + '_docker_networks_default', default=mariadb_docker_networks_default)- + lookup('vars', mariadb_name + '_docker_networks_dcustom', default=mariadb_docker_networks_custom) }}"--# Capabilities-mariadb_docker_capabilities_default: []-mariadb_docker_capabilities_custom: []-mariadb_docker_capabilities: "{{ lookup('vars', mariadb_name + '_docker_capabilities_default', default=mariadb_docker_capabilities_default)- + lookup('vars', mariadb_name + '_docker_capabilities_custom', default=mariadb_docker_capabilities_custom) }}"--# Security Opts-mariadb_docker_security_opts_default: []-mariadb_docker_security_opts_custom: []-mariadb_docker_security_opts: "{{ lookup('vars', mariadb_name + '_docker_security_opts_default', default=mariadb_docker_security_opts_default)- + lookup('vars', mariadb_name + '_docker_security_opts_custom', default=mariadb_docker_security_opts_custom) }}"+mariadb_role_docker_networks_alias: "{{ mariadb_name }}"+mariadb_role_docker_networks_default: []+mariadb_role_docker_networks_custom: []+mariadb_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='mariadb')+ + lookup('role_var', '_docker_networks_custom', role='mariadb') }}" # Restart Policy-mariadb_docker_restart_policy: unless-stopped+mariadb_role_docker_restart_policy: unless-stopped # State-mariadb_docker_state: started+mariadb_role_docker_state: started # User-mariadb_docker_user: "{{ uid }}:{{ gid }}"+mariadb_role_docker_user: "{{ uid }}:{{ gid }}"
modified
roles/mariadb/tasks/post-migration.yml
@@ -9,7 +9,7 @@ --- - name: Check for 'dump.sql' ansible.builtin.stat:- path: "/opt/mariadb_legacy/dump.sql"+ path: "{{ server_appdata_path }}/mariadb_legacy/dump.sql" register: dump - name: Import databases@@ -21,7 +21,7 @@ - name: Restore data from dump file ansible.builtin.shell: |- docker exec -i mariadb sh -c 'exec mariadb -uroot -p"$MARIADB_ROOT_PASSWORD"' < /opt/mariadb_legacy/dump.sql+ docker exec -i mariadb sh -c 'exec mariadb -uroot -p"$MARIADB_ROOT_PASSWORD"' < {{ server_appdata_path }}/mariadb_legacy/dump.sql - name: Force MariaDB upgrade ansible.builtin.shell: |
modified
roles/mariadb/tasks/pre-migration.yml
@@ -7,9 +7,9 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Check for '/opt/mariadb/databases'+- name: Check for '{{ server_appdata_path }}/mariadb/databases' ansible.builtin.stat:- path: "/opt/mariadb/databases"+ path: "{{ server_appdata_path }}/mariadb/databases" register: subfolder - name: Dump and backup databases@@ -24,9 +24,9 @@ PUID: "{{ uid }}" PGID: "{{ gid }}" TZ: "{{ tz }}"- MYSQL_ROOT_PASSWORD: "{{ mariadb_docker_envs_mysql_root_password }}"+ MYSQL_ROOT_PASSWORD: "{{ lookup('role_var', '_docker_envs_mysql_root_password', role='mariadb') }}" hostname: "mariadb"- image: "{{ mariadb_docker_image_migration }}"+ image: "{{ lookup('role_var', '_docker_image_migration', role='mariadb') }}" log_driver: "{{ (docker_log_driver != 'default') | ternary(docker_log_driver, lookup('vars', role_name + '_docker_log_driver', default=omit)) }}" log_options: "{{ (docker_log_options != 'default') | ternary(docker_log_options, lookup('vars', role_name + '_docker_log_options', default=omit)) }}" name: "mariadb"@@ -37,7 +37,7 @@ restart_policy: "{{ lookup('vars', role_name + '_docker_restart_policy', default='unless-stopped') }}" state: started stop_timeout: "{{ lookup('vars', role_name + '_docker_stop_timeout', default='10') }}"- volumes: "{{ mariadb_docker_volumes_migration }}"+ volumes: "{{ lookup('role_var', '_docker_volumes_migration', role='mariadb') }}" - name: Wait for 30 seconds ansible.builtin.wait_for:@@ -45,7 +45,7 @@ - name: Creating database dump ansible.builtin.shell: |- docker exec mariadb sh -c 'exec mariadb-dump --all-databases -uroot -p"$MYSQL_ROOT_PASSWORD"' > /opt/mariadb/dump.sql+ docker exec mariadb sh -c 'exec mariadb-dump --all-databases -uroot -p"$MYSQL_ROOT_PASSWORD"' > {{ server_appdata_path }}/mariadb/dump.sql - name: Wait for 5 seconds ansible.builtin.wait_for:@@ -56,16 +56,16 @@ vars: var_prefix: "mariadb" - - name: Relocate '/opt/mariadb' to '/opt/mariadb_legacy'- ansible.builtin.shell: "mv /opt/mariadb /opt/mariadb_legacy"+ - name: Relocate '{{ server_appdata_path }}/mariadb' to '{{ server_appdata_path }}/mariadb_legacy'+ ansible.builtin.shell: "mv {{ server_appdata_path }}/mariadb {{ server_appdata_path }}/mariadb_legacy" register: relocate - name: Wait for 5 seconds ansible.builtin.wait_for: timeout: 5 - - name: Remove '/opt/mariadb/'+ - name: Remove '{{ server_appdata_path }}/mariadb/' ansible.builtin.file:- path: /opt/mariadb/+ path: "{{ server_appdata_path }}/mariadb/" state: absent when: relocate.rc == 0
modified
roles/mongodb/defaults/main.yml
@@ -17,109 +17,58 @@ # Paths ################################ -mongodb_paths_folder: "{{ mongodb_name }}"-mongodb_paths_location: "{{ server_appdata_path }}/{{ mongodb_paths_folder }}"-mongodb_paths_folders_list:- - "{{ mongodb_paths_location }}"- - "{{ mongodb_paths_location }}/config"+mongodb_role_paths_folder: "{{ mongodb_name }}"+mongodb_role_paths_location: "{{ server_appdata_path }}/{{ mongodb_role_paths_folder }}"+mongodb_role_paths_folders_list:+ - "{{ mongodb_role_paths_location }}"+ - "{{ mongodb_role_paths_location }}/config" ################################ # Docker ################################ # Container-mongodb_docker_container: "{{ mongodb_name }}"+mongodb_role_docker_container: "{{ mongodb_name }}" # Image-mongodb_docker_image_pull: true-mongodb_docker_image_tag: "6"-mongodb_docker_image_repo: "mongo"-mongodb_docker_image: "{{ lookup('vars', mongodb_name + '_docker_image_repo', default=mongodb_docker_image_repo)- + ':' + lookup('vars', mongodb_name + '_docker_image_tag', default=mongodb_docker_image_tag) }}"--# Ports-mongodb_docker_ports_defaults: []-mongodb_docker_ports_custom: []-mongodb_docker_ports: "{{ lookup('vars', mongodb_name + '_docker_ports_default', default=mongodb_docker_ports_defaults)- + lookup('vars', mongodb_name + '_docker_ports_custom', default=mongodb_docker_ports_custom) }}"+mongodb_role_docker_image_pull: true+mongodb_role_docker_image_repo: "mongo"+mongodb_role_docker_image_tag: "6"+mongodb_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='mongodb') }}:{{ lookup('role_var', '_docker_image_tag', role='mongodb') }}" # Envs-mongodb_docker_envs_default:+mongodb_role_docker_envs_default: MONGO_DATA_DIR: "/data/db" MONGO_LOG_DIR: "/dev/null" MONGO_URL: "mongodb://{{ mongodb_name }}:27017/"-mongodb_docker_envs_custom: {}-mongodb_docker_envs: "{{ lookup('vars', mongodb_name + '_docker_envs_default', default=mongodb_docker_envs_default)- | combine(lookup('vars', mongodb_name + '_docker_envs_custom', default=mongodb_docker_envs_custom)) }}"--# Commands-mongodb_docker_commands_default: []-mongodb_docker_commands_custom: []-mongodb_docker_commands: "{{ lookup('vars', mongodb_name + '_docker_commands_default', default=mongodb_docker_commands_default)- + lookup('vars', mongodb_name + '_docker_commands_custom', default=mongodb_docker_commands_custom) }}"+mongodb_role_docker_envs_custom: {}+mongodb_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='mongodb')+ | combine(lookup('role_var', '_docker_envs_custom', role='mongodb')) }}" # Volumes-mongodb_docker_volumes_default:- - "{{ mongodb_paths_location }}:/data/db:rw"- - "{{ mongodb_paths_location }}/config:/data/configdb"-mongodb_docker_volumes_custom: []-mongodb_docker_volumes: "{{ lookup('vars', mongodb_name + '_docker_volumes_default', default=mongodb_docker_volumes_default)- + lookup('vars', mongodb_name + '_docker_volumes_custom', default=mongodb_docker_volumes_custom) }}"---# Devices-mongodb_docker_devices_default: []-mongodb_docker_devices_custom: []-mongodb_docker_devices: "{{ lookup('vars', mongodb_name + '_docker_devices_default', default=mongodb_docker_devices_default)- + lookup('vars', mongodb_name + '_docker_devices_custom', default=mongodb_docker_devices_custom) }}"--# Hosts-mongodb_docker_hosts_default: {}-mongodb_docker_hosts_custom: {}-mongodb_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', mongodb_name + '_docker_hosts_default', default=mongodb_docker_hosts_default))- | combine(lookup('vars', mongodb_name + '_docker_hosts_custom', default=mongodb_docker_hosts_custom)) }}"--# Labels-mongodb_docker_labels_default: {}-mongodb_docker_labels_custom: {}-mongodb_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', mongodb_name + '_docker_labels_default', default=mongodb_docker_labels_default))- | combine(lookup('vars', mongodb_name + '_docker_labels_custom', default=mongodb_docker_labels_custom)) }}"+mongodb_role_docker_volumes_default:+ - "{{ mongodb_role_paths_location }}:/data/db:rw"+ - "{{ mongodb_role_paths_location }}/config:/data/configdb"+mongodb_role_docker_volumes_custom: []+mongodb_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='mongodb')+ + lookup('role_var', '_docker_volumes_custom', role='mongodb') }}" # Hostname-mongodb_docker_hostname: "{{ mongodb_name }}"--# Network Mode-mongodb_docker_network_mode_default: "{{ docker_networks_name_common }}"-mongodb_docker_network_mode: "{{ lookup('vars', mongodb_name + '_docker_network_mode_default', default=mongodb_docker_network_mode_default) }}"+mongodb_role_docker_hostname: "{{ mongodb_name }}" # Networks-mongodb_docker_networks_alias: "{{ mongodb_name }}"-mongodb_docker_networks_default: []-mongodb_docker_networks_custom: []-mongodb_docker_networks: "{{ docker_networks_common- + lookup('vars', mongodb_name + '_docker_networks_default', default=mongodb_docker_networks_default)- + lookup('vars', mongodb_name + '_docker_networks_dcustom', default=mongodb_docker_networks_custom) }}"--# Capabilities-mongodb_docker_capabilities_default: []-mongodb_docker_capabilities_custom: []-mongodb_docker_capabilities: "{{ lookup('vars', mongodb_name + '_docker_capabilities_default', default=mongodb_docker_capabilities_default)- + lookup('vars', mongodb_name + '_docker_capabilities_custom', default=mongodb_docker_capabilities_custom) }}"--# Security Opts-mongodb_docker_security_opts_default: []-mongodb_docker_security_opts_custom: []-mongodb_docker_security_opts: "{{ lookup('vars', mongodb_name + '_docker_security_opts_default', default=mongodb_docker_security_opts_default)- + lookup('vars', mongodb_name + '_docker_security_opts_custom', default=mongodb_docker_security_opts_custom) }}"-+mongodb_role_docker_networks_alias: "{{ mongodb_name }}"+mongodb_role_docker_networks_default: []+mongodb_role_docker_networks_custom: []+mongodb_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='mongodb')+ + lookup('role_var', '_docker_networks_custom', role='mongodb') }}" # Restart Policy-mongodb_docker_restart_policy: unless-stopped+mongodb_role_docker_restart_policy: unless-stopped # State-mongodb_docker_state: started+mongodb_role_docker_state: started # User-mongodb_docker_user: "{{ uid }}:{{ gid }}"+mongodb_role_docker_user: "{{ uid }}:{{ gid }}"
modified
roles/motd/defaults/main.yml
@@ -14,3 +14,81 @@ motd_use_python: true motd_cli_path: "/usr/local/bin/sb" motd_cli_flags: "--all --title 'Saltbox' --font ivrit --type parchment"++################################+# Service Definitions+################################++# Service types:+# - multi_instance_apikey: Multiple instances with API key (sonarr, radarr, lidarr, readarr, overseerr)+# - multi_instance_token: Multiple instances with token (plex)+# - single_instance_apikey: Single instance with API key (sabnzbd)+# - single_instance_userpass: Single instance with username/password (nzbget)+# - multi_instance_userpass: Multiple instances with username/password (qbittorrent)+# - disabled: Statically disabled services (jellyfin, emby, rtorrent)++motd_services:+ - name: colors+ type: colors+ - name: emby+ type: multi_instance_token+ check_field: token+ output_field: token+ - name: jellyfin+ type: multi_instance_token+ check_field: token+ output_field: token+ - name: lidarr+ type: multi_instance_apikey+ check_field: api_key+ output_field: apikey+ - name: nzbget+ type: single_instance_userpass+ check_field: username+ - name: plex+ type: multi_instance_token+ check_field: token+ output_field: token+ - name: qbittorrent+ type: multi_instance_userpass_info+ check_field: username+ - name: radarr+ type: multi_instance_apikey+ check_field: api_key+ output_field: apikey+ - name: rutorrent+ type: multi_instance_userpass+ instances_var: rutorrent_instances+ url_suffix: "/RPC2"+ - name: sabnzbd+ type: single_instance_apikey+ check_field: api_key+ output_field: apikey+ - name: sonarr+ type: multi_instance_apikey+ check_field: api_key+ output_field: apikey+ - name: systemd+ type: systemd++motd_colors:+ text:+ label: "#FF79D0"+ value: "#12C78F"+ app_name: "#BF976F"+ status:+ warning: "#F5EF34"+ success: "#12C78F"+ error: "#EB4268"+ progress_bar:+ low: "#A8CC8C"+ high: "#DBAB79"+ critical: "#E88388"++motd_systemd:+ enabled: true+ additional_services: []+ display_names: {}++motd_config_mapping:+ rutorrent: rtorrent
modified
roles/motd/tasks/cli_motd.yml
@@ -18,7 +18,26 @@ - sonarr - radarr - lidarr- - readarr+ - nzbget+ - sabnzbd++ - name: Build emby_info dict+ ansible.builtin.include_tasks: subtasks/emby_info.yml+ vars:+ emby_name: "{{ item }}"+ loop: "{{ emby_instances | default(['emby']) }}"++ - name: Build jellyfin_info dict+ ansible.builtin.include_tasks: subtasks/jellyfin_info.yml+ vars:+ jellyfin_name: "{{ item }}"+ loop: "{{ jellyfin_instances | default(['jellyfin']) }}"++ - name: Build qbittorrent_info dict+ ansible.builtin.include_tasks: subtasks/qbittorrent_info.yml+ vars:+ qbittorrent_name: "{{ item }}"+ loop: "{{ qbittorrent_instances | default(['qbittorrent']) }}" - name: Import 'motd.yml' template ansible.builtin.template:
modified
roles/motd/tasks/main.yml
@@ -9,7 +9,7 @@ --- - name: Install required packages ansible.builtin.apt:- state: present+ state: latest name: - lsb-release - figlet@@ -19,11 +19,11 @@ - name: Install required packages ansible.builtin.apt:- state: present+ state: latest name: - update-motd - update-notifier-common- when: (ansible_distribution | lower == 'ubuntu')+ when: (ansible_facts['distribution'] | lower == 'ubuntu') - name: Remove existing motd ansible.builtin.file:@@ -44,7 +44,7 @@ mode: "0775" recurse: true with_items:- - /opt/motd+ - "{{ server_appdata_path }}/motd" - name: Check if MOTD news service ansible.builtin.stat:@@ -66,14 +66,14 @@ ansible.builtin.systemd_service: name: motd-news.service state: stopped- enabled: no+ enabled: false masked: true - name: Disable MOTD news timer ansible.builtin.systemd_service: name: motd-news.timer state: stopped- enabled: no+ enabled: false masked: true - name: Create dynamic motd directory@@ -98,7 +98,7 @@ - name: Install 'openssh-server' ansible.builtin.apt: name: openssh-server- state: present+ state: latest when: (not sshd_config.stat.exists) - name: Wait for '/etc/ssh/sshd_config' to be created
modified
roles/motd/tasks/python_motd.yml
@@ -10,7 +10,7 @@ - name: Clone Saltbox MOTD ansible.builtin.git: repo: https://github.com/saltyorg/motd.git- dest: /opt/motd+ dest: "{{ server_appdata_path }}/motd" clone: true version: HEAD force: true@@ -19,13 +19,13 @@ - name: Check if 'config.json' exists ansible.builtin.stat:- path: "/opt/motd/config.json"+ path: "{{ server_appdata_path }}/motd/config.json" register: motd_config - name: Import default 'config.json' ansible.builtin.template: src: config.json.j2- dest: /opt/motd/config.json+ dest: "{{ server_appdata_path }}/motd/config.json" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"@@ -36,29 +36,29 @@ when: motd_config.stat.exists block: - name: Check value of motd banner- ansible.builtin.shell: "jq '.motd.banner_title' /opt/motd/config.json"+ ansible.builtin.shell: "jq '.motd.banner_title' {{ server_appdata_path }}/motd/config.json" register: motd_banner - name: Migrate value of motd banner ansible.builtin.shell: |- jq '.motd.banner_title = "Saltbox"' /opt/motd/config.json | sponge /opt/motd/config.json+ jq '.motd.banner_title = "Saltbox"' {{ server_appdata_path }}/motd/config.json | sponge {{ server_appdata_path }}/motd/config.json when: ('Cloudbox' in motd_banner.stdout) - name: Config path migration when: motd_config.stat.exists block: - name: Check value of disk path- ansible.builtin.shell: "jq '.disk.path' /opt/motd/config.json"+ ansible.builtin.shell: "jq '.disk.path' {{ server_appdata_path }}/motd/config.json" register: disk_path - name: Migrate value of disk path # noqa jinja[spacing] ansible.builtin.shell: |- jq --arg disk / '. + {disk: {path: $disk}}' /opt/motd/config.json | sponge /opt/motd/config.json+ jq --arg disk / '. + {disk: {path: $disk}}' {{ server_appdata_path }}/motd/config.json | sponge {{ server_appdata_path }}/motd/config.json when: (disk_path.stdout == 'null') -- name: Touch '/opt/motd/activity.log' file+- name: Touch '{{ server_appdata_path }}/motd/activity.log' file ansible.builtin.file:- path: /opt/motd/activity.log+ path: "{{ server_appdata_path }}/motd/activity.log" state: touch owner: "{{ user.name }}" group: "{{ user.name }}"@@ -66,14 +66,14 @@ - name: Set 'saltbox-motd.py' as executable ansible.builtin.file:- path: /opt/motd/saltbox-motd.py+ path: "{{ server_appdata_path }}/motd/saltbox-motd.py" owner: "{{ user.name }}" group: "{{ user.name }}" mode: a+x - name: Import dynamic motd files ansible.builtin.copy:- src: /opt/motd/motd/+ src: "{{ server_appdata_path }}/motd/motd/" dest: /etc/update-motd.d/ force: true mode: "0775"
modified
roles/motd/templates/motd.yml.j2
@@ -1,33 +1,151 @@ ----{% set services = ['lidarr', 'plex', 'radarr', 'readarr', 'sonarr'] %}-{% for service in services %}-{% set service_info_var = service + '_info' %}+{% for service in motd_services %}+{% set service_name = service.name %}+{% set service_info_var = service_name + '_info' %} {% set installed_instances = [] %}-{% if vars[service_info_var] is defined %}-{% for instance_name, instance_data in vars[service_info_var].items() %}-{% if service == 'plex' %}-{% if instance_data.token != 'not installed' %}-{% set _ = installed_instances.append({'name': instance_data.name, 'url': instance_data.url, 'token': instance_data.token}) %}-{% endif %}-{% else %}-{% if instance_data.api_key != 'not installed' %}-{% set _ = installed_instances.append({'name': instance_data.name, 'url': instance_data.url, 'apikey': instance_data.api_key}) %}-{% endif %}+{# Multi-instance services with API key or token #}+{% if service.type in ['multi_instance_apikey', 'multi_instance_token'] %}+{% set has_installed = [] %}+{% if lookup('vars', service_info_var, default=None) is not none %}+{% for instance_name, instance_data in lookup('vars', service_info_var).items() %}+{% set instance_entry = {'name': instance_data.name, 'url': instance_data.url} %}+{% set _ = instance_entry.update({service.output_field: instance_data[service.check_field]}) %}+{% set _ = installed_instances.append(instance_entry) %}+{% if instance_data[service.check_field] != 'not installed' %}+{% set _ = has_installed.append(true) %} {% endif %} {% endfor %} {% endif %} {% if installed_instances %}-{{ service }}:+{{ service_name }}:+ enabled: {{ (has_installed | length > 0) | lower }}+ instances: {% for instance in installed_instances %}- - name: {{ instance.name }}- url: {{ instance.url }}-{% if service == 'plex' %}- token: {{ instance.token }}+ - name: {{ instance.name }}+ url: {{ instance.url }}+ {{ service.output_field }}: {{ instance[service.output_field] if instance[service.output_field] != 'not installed' else 'your-' + service.output_field + '-here' }}+ enabled: true+{% endfor %} {% else %}- apikey: {{ instance.apikey }}+{{ service_name }}:+ enabled: false+ instances:+ - name: {{ service_name | capitalize }}+ url: http://localhost:8080+ {{ service.output_field }}: your-{{ service.output_field }}-here+ enabled: true+{% endif %}+{# Single-instance services with API key #}+{% elif service.type == 'single_instance_apikey' %}+{% set service_info = lookup('vars', service_info_var, default=None) %}+{% if service_info is not none and service_info[service.check_field] != 'not installed' %}+{{ service_name }}:+ enabled: true+ instances:+ - name: {{ service_info.name }}+ url: {{ service_info.url }}+ {{ service.output_field }}: {{ service_info[service.check_field] }}+ enabled: true+{% else %}+{{ service_name }}:+ enabled: false+ instances:+ - name: {{ service_name | capitalize }}+ url: http://localhost:8080+ {{ service.output_field }}: your-{{ service.output_field }}-here+ enabled: true+{% endif %}+{# Single-instance services with username/password #}+{% elif service.type == 'single_instance_userpass' %}+{% set service_info = lookup('vars', service_info_var, default=None) %}+{% if service_info is not none and service_info[service.check_field] != 'not installed' %}+{{ service_name }}:+ enabled: true+ instances:+ - name: {{ service_info.name }}+ url: {{ service_info.url }}+ user: {{ service_info.username }}+ password: {{ service_info.password }}+ enabled: true+{% else %}+{{ service_name }}:+ enabled: false+ instances:+ - name: {{ service_name | capitalize }}+ url: http://localhost:6789+ user: {{ service_name }}+ password: your-password-here+ enabled: true+{% endif %}+{# Multi-instance services with username/password using info dict (e.g., qbittorrent) #}+{% elif service.type == 'multi_instance_userpass_info' %}+{% if lookup('vars', service_info_var, default=None) is not none %}+{% for instance_name, instance_data in lookup('vars', service_info_var).items() %}+{% if instance_data[service.check_field] != 'not installed' %}+{% set instance_entry = {'name': instance_data.name, 'url': instance_data.url, 'username': instance_data.username, 'password': instance_data.password} %}+{% set _ = installed_instances.append(instance_entry) %} {% endif %} {% endfor %}+{% endif %}+{% if installed_instances %}+{{ service_name }}:+ enabled: true+ instances:+{% for instance in installed_instances %}+ - name: {{ instance.name }}+ url: {{ instance.url }}+ user: {{ instance.username }}+ password: {{ instance.password }}+ enabled: true+{% endfor %} {% else %}-{{ service }}: []+{{ service_name }}:+ enabled: false+ instances:+ - name: {{ service_name | capitalize }}+ url: http://localhost:8080+ user: your-username-here+ password: your-password-here+ enabled: true+{% endif %}+{# Multi-instance services with username/password using direct lookup (e.g., rutorrent) #}+{% elif service.type == 'multi_instance_userpass' %}+{% set config_key = motd_config_mapping[service_name] | default(service_name) %}+{% set instances_list = lookup('vars', service.instances_var, default=[service_name]) %}+{% set url_suffix = service.url_suffix | default('') %}+{{ config_key }}:+ enabled: true+ instances:+{% for instance in instances_list %}+ - name: {{ instance }}+ url: {{ lookup('role_var', '_web_url', role=instance) + url_suffix }}+ user: {{ user.name }}+ password: {{ user.pass }}+ enabled: true+{% endfor %}+{# Systemd service #}+{% elif service.type == 'systemd' %}+{{ service_name }}:+ enabled: {{ motd_systemd.enabled | lower }}+ additional_services: {{ motd_systemd.additional_services | to_json }}+ display_names: {{ motd_systemd.display_names | to_json }}+{# Colors #}+{% elif service.type == 'colors' %}+{{ service_name }}:+ text:+ label: "{{ motd_colors.text.label }}"+ value: "{{ motd_colors.text.value }}"+ app_name: "{{ motd_colors.text.app_name }}"+ status:+ warning: "{{ motd_colors.status.warning }}"+ success: "{{ motd_colors.status.success }}"+ error: "{{ motd_colors.status.error }}"+ progress_bar:+ low: "{{ motd_colors.progress_bar.low }}"+ high: "{{ motd_colors.progress_bar.high }}"+ critical: "{{ motd_colors.progress_bar.critical }}"+{% endif %}+{% if not loop.last %}+ {% endif %} {% endfor %}
modified
roles/mount_templates/tasks/main.yml
@@ -16,12 +16,12 @@ mode: "0775" recurse: true with_items:- - /opt/mount-templates+ - "{{ server_appdata_path }}/mount-templates" - name: Clone mount-templates repo 'HEAD' ansible.builtin.git: repo: https://github.com/saltyorg/mount-templates.git- dest: /opt/mount-templates+ dest: "{{ server_appdata_path }}/mount-templates" clone: true version: HEAD force: true@@ -33,7 +33,7 @@ - name: Clone mount-templates repo 'main' ansible.builtin.git: repo: https://github.com/saltyorg/mount-templates.git- dest: /opt/mount-templates+ dest: "{{ server_appdata_path }}/mount-templates" clone: true version: main force: true
modified
roles/netdata/defaults/main.yml
@@ -17,100 +17,88 @@ # Settings ################################ -netdata_claim_token: ""-netdata_claim_url: ""-netdata_claim_room: ""+netdata_role_claim_token: ""+netdata_role_claim_url: ""+netdata_role_claim_room: "" ################################ # Docker Socket Proxy ################################ -netdata_docker_socket_proxy_envs:+netdata_role_docker_socket_proxy_envs: CONTAINERS: "1" ################################ # Paths ################################ -netdata_paths_folder: "{{ netdata_name }}"-netdata_paths_location: "{{ server_appdata_path }}/{{ netdata_paths_folder }}"-netdata_paths_config_location: "{{ netdata_paths_location }}/config"-netdata_paths_config_file_location: "{{ netdata_paths_location }}/config/netdata.conf"-netdata_paths_folders_list:- - "{{ netdata_paths_location }}"- - "{{ netdata_paths_location }}/config"+netdata_role_paths_folder: "{{ netdata_name }}"+netdata_role_paths_location: "{{ server_appdata_path }}/{{ netdata_role_paths_folder }}"+netdata_role_paths_config_location: "{{ netdata_role_paths_location }}/config"+netdata_role_paths_config_file_location: "{{ netdata_role_paths_location }}/config/netdata.conf"+netdata_role_paths_folders_list:+ - "{{ netdata_role_paths_location }}"+ - "{{ netdata_role_paths_location }}/config" ################################ # Web ################################ -netdata_web_subdomain: "{{ netdata_name }}"-netdata_web_domain: "{{ user.domain }}"-netdata_web_port: "19999"-netdata_web_url: "{{ 'https://' + (netdata_web_subdomain + '.' + netdata_web_domain- if (netdata_web_subdomain | length > 0)- else netdata_web_domain) }}"+netdata_role_web_subdomain: "{{ netdata_name }}"+netdata_role_web_domain: "{{ user.domain }}"+netdata_role_web_port: "19999"+netdata_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='netdata') + '.' + lookup('role_var', '_web_domain', role='netdata')+ if (lookup('role_var', '_web_subdomain', role='netdata') | length > 0)+ else lookup('role_var', '_web_domain', role='netdata')) }}" ################################ # DNS ################################ -netdata_dns_record: "{{ netdata_web_subdomain }}"-netdata_dns_zone: "{{ netdata_web_domain }}"-netdata_dns_proxy: "{{ dns.proxied }}"+netdata_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='netdata') }}"+netdata_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='netdata') }}"+netdata_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -netdata_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-netdata_traefik_middleware_default: "{{ traefik_default_middleware }}"-netdata_traefik_middleware_custom: ""-netdata_traefik_certresolver: "{{ traefik_default_certresolver }}"-netdata_traefik_enabled: true-netdata_traefik_api_enabled: false-netdata_traefik_api_endpoint: ""+netdata_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+netdata_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+netdata_role_traefik_middleware_custom: ""+netdata_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+netdata_role_traefik_enabled: true+netdata_role_traefik_api_enabled: false+netdata_role_traefik_api_endpoint: "" ################################ # Docker ################################ # Container-netdata_docker_container: "{{ netdata_name }}"+netdata_role_docker_container: "{{ netdata_name }}" # Image-netdata_docker_image_pull: true-netdata_docker_image_tag: "latest"-netdata_docker_image: "netdata/netdata:{{ netdata_docker_image_tag }}"--# Ports-netdata_docker_ports_defaults: []-netdata_docker_ports_custom: []-netdata_docker_ports: "{{ netdata_docker_ports_defaults- + netdata_docker_ports_custom }}"+netdata_role_docker_image_pull: true+netdata_role_docker_image_repo: "netdata/netdata"+netdata_role_docker_image_tag: "latest"+netdata_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='netdata') }}:{{ lookup('role_var', '_docker_image_tag', role='netdata') }}" # Envs-netdata_docker_envs_default:+netdata_role_docker_envs_default: PGID: "{{ gid }}" TZ: "{{ tz }}" DOCKER_HOST: "{{ netdata_name }}-docker-socket-proxy:2375"- NETDATA_CLAIM_TOKEN: "{{ netdata_claim_token }}"- NETDATA_CLAIM_URL: "{{ netdata_claim_url }}"- NETDATA_CLAIM_ROOMS: "{{ netdata_claim_room }}"-netdata_docker_envs_custom: {}-netdata_docker_envs: "{{ netdata_docker_envs_default- | combine(netdata_docker_envs_custom) }}"--# Commands-netdata_docker_commands_default: []-netdata_docker_commands_custom: []-netdata_docker_commands: "{{ netdata_docker_commands_default- + netdata_docker_commands_custom }}"+ NETDATA_CLAIM_TOKEN: "{{ lookup('role_var', '_claim_token', role='netdata') }}"+ NETDATA_CLAIM_URL: "{{ lookup('role_var', '_claim_url', role='netdata') }}"+ NETDATA_CLAIM_ROOMS: "{{ lookup('role_var', '_claim_room', role='netdata') }}"+netdata_role_docker_envs_custom: {}+netdata_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='netdata')+ | combine(lookup('role_var', '_docker_envs_custom', role='netdata')) }}" # Volumes-netdata_docker_volumes_global: false-netdata_docker_volumes_default:- - "{{ netdata_paths_location }}/config:/etc/netdata"+netdata_role_docker_volumes_default:+ - "{{ netdata_role_paths_location }}/config:/etc/netdata" - "netdatalib:/var/lib/netdata" - "netdatacache:/var/cache/netdata" - "/etc/passwd:/host/etc/passwd:ro"@@ -120,66 +108,52 @@ - "/etc/os-release:/host/etc/os-release:ro" - "/var/log:/host/var/log:ro" - "/run/dbus:/run/dbus:ro"-netdata_docker_volumes_custom: []-netdata_docker_volumes: "{{ netdata_docker_volumes_default- + netdata_docker_volumes_custom }}"--# Devices-netdata_docker_devices_default: []-netdata_docker_devices_custom: []-netdata_docker_devices: "{{ netdata_docker_devices_default- + netdata_docker_devices_custom }}"--# Hosts-netdata_docker_hosts_default: {}-netdata_docker_hosts_custom: {}-netdata_docker_hosts: "{{ docker_hosts_common- | combine(netdata_docker_hosts_default)- | combine(netdata_docker_hosts_custom) }}"+netdata_role_docker_volumes_custom: []+netdata_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='netdata')+ + lookup('role_var', '_docker_volumes_custom', role='netdata') }}" # Labels-netdata_docker_labels_default: {}-netdata_docker_labels_custom: {}-netdata_docker_labels: "{{ docker_labels_common- | combine(netdata_docker_labels_default)- | combine(netdata_docker_labels_custom) }}"+netdata_role_docker_labels_default: {}+netdata_role_docker_labels_custom: {}+netdata_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='netdata')+ | combine(lookup('role_var', '_docker_labels_custom', role='netdata')) }}" # Hostname-netdata_docker_hostname: "{{ netdata_name }}"+netdata_role_docker_hostname: "{{ netdata_name }}" # Networks-netdata_docker_networks_alias: "{{ netdata_name }}"-netdata_docker_networks_default: []-netdata_docker_networks_custom: []-netdata_docker_networks: "{{ docker_networks_common- + netdata_docker_networks_default- + netdata_docker_networks_custom }}"+netdata_role_docker_networks_alias: "{{ netdata_name }}"+netdata_role_docker_networks_default: []+netdata_role_docker_networks_custom: []+netdata_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='netdata')+ + lookup('role_var', '_docker_networks_custom', role='netdata') }}" # Capabilities-netdata_docker_capabilities_default:+netdata_role_docker_capabilities_default: - SYS_PTRACE - SYS_ADMIN-netdata_docker_capabilities_custom: []-netdata_docker_capabilities: "{{ netdata_docker_capabilities_default- + netdata_docker_capabilities_custom }}"+netdata_role_docker_capabilities_custom: []+netdata_role_docker_capabilities: "{{ lookup('role_var', '_docker_capabilities_default', role='netdata')+ + lookup('role_var', '_docker_capabilities_custom', role='netdata') }}" # Security Opts-netdata_docker_security_opts_default:+netdata_role_docker_security_opts_default: - apparmor=unconfined-netdata_docker_security_opts_custom: []-netdata_docker_security_opts: "{{ netdata_docker_security_opts_default- + netdata_docker_security_opts_custom }}"+netdata_role_docker_security_opts_custom: []+netdata_role_docker_security_opts: "{{ lookup('role_var', '_docker_security_opts_default', role='netdata')+ + lookup('role_var', '_docker_security_opts_custom', role='netdata') }}" # Restart Policy-netdata_docker_restart_policy: unless-stopped+netdata_role_docker_restart_policy: unless-stopped # State-netdata_docker_state: started+netdata_role_docker_state: started # PID Mode-netdata_docker_pid_mode: "host"+netdata_role_docker_pid_mode: "host" # Dependencies-netdata_depends_on: "{{ netdata_name }}-docker-socket-proxy"-netdata_depends_on_delay: "0"-netdata_depends_on_healthchecks: "false"+netdata_role_depends_on: "{{ netdata_name }}-docker-socket-proxy"+netdata_role_depends_on_delay: "0"+netdata_role_depends_on_healthchecks: "false"
modified
roles/netdata/tasks/main.yml
@@ -12,28 +12,28 @@ name: docker_socket_proxy vars: docker_socket_proxy_name: "{{ netdata_name }}-docker-socket-proxy"- docker_socket_proxy_docker_envs_custom: "{{ netdata_docker_socket_proxy_envs }}"+ docker_socket_proxy_role_docker_envs_custom: "{{ lookup('role_var', '_docker_socket_proxy_envs', role='netdata') }}" - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml" -- name: Check if '{{ netdata_paths_config_file_location }}' exists+- name: Check if '{{ netdata_role_paths_config_file_location }}' exists ansible.builtin.stat:- path: "{{ netdata_paths_config_file_location }}"+ path: "{{ netdata_role_paths_config_file_location }}" register: netdata_config - name: Create directories ansible.builtin.include_tasks: "{{ resources_tasks_path }}/directories/create_directories.yml" - name: Settings Task- ansible.builtin.import_tasks: "subtasks/settings.yml"+ ansible.builtin.include_tasks: "subtasks/settings.yml" when: (not netdata_config.stat.exists) - name: Create a volume
modified
roles/netdata/tasks/subtasks/settings.yml
@@ -9,13 +9,13 @@ --- - name: Settings | Copy Default Config if it doesn't exist ansible.builtin.shell: |- docker run -d --name netdata_tmp "{{ netdata_docker_image }}"- docker cp netdata_tmp:/etc/netdata/. "{{ netdata_paths_config_location }}"+ docker run -d --name netdata_tmp "{{ lookup('role_var', '_docker_image', role='netdata') }}"+ docker cp netdata_tmp:/etc/netdata/. "{{ netdata_role_paths_location }}" docker rm -f netdata_tmp - name: Chown directories ansible.builtin.file:- path: "{{ netdata_paths_location }}"+ path: "{{ netdata_role_paths_location }}" state: directory owner: "{{ user.name }}" group: "{{ user.name }}"
modified
roles/nginx/defaults/main.yml
@@ -17,133 +17,84 @@ # Paths ################################ -nginx_paths_folder: "{{ nginx_name }}"-nginx_paths_location: "{{ server_appdata_path }}/{{ nginx_paths_folder }}"-nginx_paths_folders_list:- - "{{ nginx_paths_location }}"+nginx_role_paths_folder: "{{ nginx_name }}"+nginx_role_paths_location: "{{ server_appdata_path }}/{{ nginx_role_paths_folder }}"+nginx_role_paths_folders_list:+ - "{{ nginx_role_paths_location }}" ################################ # Web ################################ -nginx_web_subdomain: "{{ nginx_name }}"-nginx_web_domain: "{{ user.domain }}"-nginx_web_port: "80"-nginx_web_url: "{{ 'https://' + (lookup('vars', nginx_name + '_web_subdomain', default=nginx_web_subdomain) + '.' + lookup('vars', nginx_name + '_web_domain', default=nginx_web_domain)- if (lookup('vars', nginx_name + '_web_subdomain', default=nginx_web_subdomain) | length > 0)- else lookup('vars', nginx_name + '_web_domain', default=nginx_web_domain)) }}"+nginx_role_web_subdomain: "{{ nginx_name }}"+nginx_role_web_domain: "{{ user.domain }}"+nginx_role_web_port: "80"+nginx_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='nginx') + '.' + lookup('role_var', '_web_domain', role='nginx')+ if (lookup('role_var', '_web_subdomain', role='nginx') | length > 0)+ else lookup('role_var', '_web_domain', role='nginx')) }}" ################################ # DNS ################################ -nginx_dns_record: "{{ lookup('vars', nginx_name + '_web_subdomain', default=nginx_web_subdomain) }}"-nginx_dns_zone: "{{ lookup('vars', nginx_name + '_web_domain', default=nginx_web_domain) }}"-nginx_dns_proxy: "{{ dns.proxied }}"+nginx_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='nginx') }}"+nginx_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='nginx') }}"+nginx_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -nginx_traefik_sso_middleware: ""-nginx_traefik_middleware_default: "{{ traefik_default_middleware }}"-nginx_traefik_middleware_custom: ""-nginx_traefik_certresolver: "{{ traefik_default_certresolver }}"-nginx_traefik_enabled: true-nginx_traefik_api_enabled: false-nginx_traefik_api_endpoint: ""+nginx_role_traefik_sso_middleware: ""+nginx_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+nginx_role_traefik_middleware_custom: ""+nginx_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+nginx_role_traefik_enabled: true+nginx_role_traefik_api_enabled: false+nginx_role_traefik_api_endpoint: "" ################################ # Docker ################################ # Container-nginx_docker_container: "{{ nginx_name }}"+nginx_role_docker_container: "{{ nginx_name }}" # Image-nginx_docker_image_pull: true-nginx_docker_image_repo: "lscr.io/linuxserver/nginx"-nginx_docker_image_tag: "latest"-nginx_docker_image: "{{ lookup('vars', nginx_name + '_docker_image_repo', default=nginx_docker_image_repo)- + ':' + lookup('vars', nginx_name + '_docker_image_tag', default=nginx_docker_image_tag) }}"--# Ports-nginx_docker_ports_defaults: []-nginx_docker_ports_custom: []-nginx_docker_ports: "{{ lookup('vars', nginx_name + '_docker_ports_defaults', default=nginx_docker_ports_defaults)- + lookup('vars', nginx_name + '_docker_ports_custom', default=nginx_docker_ports_custom) }}"+nginx_role_docker_image_pull: true+nginx_role_docker_image_repo: "lscr.io/linuxserver/nginx"+nginx_role_docker_image_tag: "latest"+nginx_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='nginx') }}:{{ lookup('role_var', '_docker_image_tag', role='nginx') }}" # Envs-nginx_docker_envs_default:+nginx_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" TZ: "{{ tz }}"-nginx_docker_envs_custom: {}-nginx_docker_envs: "{{ lookup('vars', nginx_name + '_docker_envs_default', default=nginx_docker_envs_default)- | combine(lookup('vars', nginx_name + '_docker_envs_custom', default=nginx_docker_envs_custom)) }}"--# Commands-nginx_docker_commands_default: []-nginx_docker_commands_custom: []-nginx_docker_commands: "{{ lookup('vars', nginx_name + '_docker_commands_default', default=nginx_docker_commands_default)- + lookup('vars', nginx_name + '_docker_commands_custom', default=nginx_docker_commands_custom) }}"+nginx_role_docker_envs_custom: {}+nginx_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='nginx')+ | combine(lookup('role_var', '_docker_envs_custom', role='nginx')) }}" # Volumes-nginx_docker_volumes_default:- - "{{ nginx_paths_location }}:/config"-nginx_docker_volumes_custom: []-nginx_docker_volumes: "{{ lookup('vars', nginx_name + '_docker_volumes_default', default=nginx_docker_volumes_default)- + lookup('vars', nginx_name + '_docker_volumes_custom', default=nginx_docker_volumes_custom) }}"--# Devices-nginx_docker_devices_default: []-nginx_docker_devices_custom: []-nginx_docker_devices: "{{ lookup('vars', nginx_name + '_docker_devices_default', default=nginx_docker_devices_default)- + lookup('vars', nginx_name + '_docker_devices_custom', default=nginx_docker_devices_custom) }}"--# Hosts-nginx_docker_hosts_default: {}-nginx_docker_hosts_custom: {}-nginx_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', nginx_name + '_docker_hosts_default', default=nginx_docker_hosts_default))- | combine(lookup('vars', nginx_name + '_docker_hosts_custom', default=nginx_docker_hosts_custom)) }}"--# Labels-nginx_docker_labels_default: {}-nginx_docker_labels_custom: {}-nginx_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', nginx_name + '_docker_labels_default', default=nginx_docker_labels_default))- | combine(lookup('vars', nginx_name + '_docker_labels_custom', default=nginx_docker_labels_custom)) }}"+nginx_role_docker_volumes_default:+ - "{{ nginx_role_paths_location }}:/config"+nginx_role_docker_volumes_custom: []+nginx_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='nginx')+ + lookup('role_var', '_docker_volumes_custom', role='nginx') }}" # Hostname-nginx_docker_hostname: "{{ nginx_name }}"--# Network Mode-nginx_docker_network_mode_default: "{{ docker_networks_name_common }}"-nginx_docker_network_mode: "{{ lookup('vars', nginx_name + '_docker_network_mode_default', default=nginx_docker_network_mode_default) }}"+nginx_role_docker_hostname: "{{ nginx_name }}" # Networks-nginx_docker_networks_alias: "{{ nginx_name }}"-nginx_docker_networks_default: []-nginx_docker_networks_custom: []-nginx_docker_networks: "{{ docker_networks_common- + lookup('vars', nginx_name + '_docker_networks_default', default=nginx_docker_networks_default)- + lookup('vars', nginx_name + '_docker_networks_custom', default=nginx_docker_networks_custom) }}"--# Capabilities-nginx_docker_capabilities_default: []-nginx_docker_capabilities_custom: []-nginx_docker_capabilities: "{{ lookup('vars', nginx_name + '_docker_capabilities_default', default=nginx_docker_capabilities_default)- + lookup('vars', nginx_name + '_docker_capabilities_custom', default=nginx_docker_capabilities_custom) }}"--# Security Opts-nginx_docker_security_opts_default: []-nginx_docker_security_opts_custom: []-nginx_docker_security_opts: "{{ lookup('vars', nginx_name + '_docker_security_opts_default', default=nginx_docker_security_opts_default)- + lookup('vars', nginx_name + '_docker_security_opts_custom', default=nginx_docker_security_opts_custom) }}"+nginx_role_docker_networks_alias: "{{ nginx_name }}"+nginx_role_docker_networks_default: []+nginx_role_docker_networks_custom: []+nginx_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='nginx')+ + lookup('role_var', '_docker_networks_custom', role='nginx') }}" # Restart Policy-nginx_docker_restart_policy: unless-stopped+nginx_role_docker_restart_policy: unless-stopped # State-nginx_docker_state: started+nginx_role_docker_state: started
modified
roles/nginx/tasks/main2.yml
@@ -11,9 +11,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"
modified
roles/node_exporter/defaults/main.yml
@@ -7,7 +7,7 @@ # GNU General Public License v3.0 # ########################################################################## ----node_exporter_path: "/opt/node_exporter"+node_exporter_path: "{{ server_appdata_path }}/node_exporter" node_exporter_latest_releases_url: "{{ svm }}https://api.github.com/repos/prometheus/node_exporter/releases/latest"
modified
roles/notify/tasks/main.yml
@@ -8,7 +8,7 @@ ######################################################################### --- - name: Import Variables Task- ansible.builtin.import_tasks: "subtasks/variables.yml"+ ansible.builtin.include_tasks: "subtasks/variables.yml" - name: Sent Notification ansible.builtin.include_tasks: "subtasks/{{ item }}.yml"
modified
roles/nvidia/defaults/main.yml
@@ -15,7 +15,7 @@ nvidia_enabled: false # Fetches the latest supported driver version supported by the keylase patch when set to "latest"-# If set to "ignore" then driver installation will be ignored and only patching will run.+# If set to "ignore" then driver installation and patching will be skipped. # Otherwise specify a valid driver like this (it being a quoted string is important): # nvidia_driver_version: "555.58.02" nvidia_driver_version: "latest"@@ -41,7 +41,7 @@ nvidia_patch_download_dest: "/tmp/NVIDIA-patch.sh" # Harcoded in the patch script file-nvidia_patch_backup_file_location: "/opt/nvidia/libnvidia-encode-backup"+nvidia_patch_backup_file_location: "{{ server_appdata_path }}/nvidia/libnvidia-encode-backup" ################################ # Docker
modified
roles/nvidia/tasks/main.yml
@@ -7,175 +7,178 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Install mokutil- ansible.builtin.apt:- name: mokutil- state: latest+- name: Nvidia Tasks+ when: (nvidia_driver_version | lower != 'ignore')+ block:+ - name: Install mokutil+ ansible.builtin.apt:+ name: mokutil+ state: latest -- name: Check if Secure Boot is enabled- ansible.builtin.shell: "mokutil --sb-state | grep -q 'SecureBoot enabled'"- register: secure_boot_enabled- changed_when: false- failed_when: false+ - name: Check if Secure Boot is enabled+ ansible.builtin.shell: "mokutil --sb-state | grep -q 'SecureBoot enabled'"+ register: secure_boot_enabled+ changed_when: false+ failed_when: false -- name: Fail if Secure Boot is enabled- ansible.builtin.fail:- msg: "This role does not support SecureBoot enabled systems."- when: secure_boot_enabled.rc == 0+ - name: Fail if Secure Boot is enabled+ ansible.builtin.fail:+ msg: "This role does not support SecureBoot enabled systems."+ when: secure_boot_enabled.rc == 0 -- name: Install pkg-config- ansible.builtin.apt:- name: pkg-config- state: latest+ - name: Install pkg-config+ ansible.builtin.apt:+ name: pkg-config+ state: latest -- name: Fetch all Nvidia GPU IDs- ansible.builtin.shell: lspci | grep -E '.*VGA.*NVIDIA|.*3D controller.*NVIDIA' | cut -d' ' -f 1- register: nvidia_gpu_ids- changed_when: false+ - name: Fetch all Nvidia GPU IDs+ ansible.builtin.shell: lspci | grep -E '.*VGA.*NVIDIA|.*3D controller.*NVIDIA' | cut -d' ' -f 1+ register: nvidia_gpu_ids+ changed_when: false -- name: Check if any NVIDIA GPUs were found- ansible.builtin.set_fact:- nvidia_gpus_found: "{{ nvidia_gpu_ids.stdout_lines | length > 0 }}"+ - name: Check if any NVIDIA GPUs were found+ ansible.builtin.set_fact:+ nvidia_gpus_found: "{{ nvidia_gpu_ids.stdout_lines | length > 0 }}" -- name: Fail if no NVIDIA GPU was detected- ansible.builtin.fail:- msg: "No Nvidia GPUs could be detected."- when: not nvidia_gpus_found+ - name: Fail if no NVIDIA GPU was detected+ ansible.builtin.fail:+ msg: "No Nvidia GPUs could be detected."+ when: not nvidia_gpus_found -- name: Nvidia Setup block- when: nvidia_gpus_found- block:- - name: Fetch detailed info for all Nvidia GPUs- ansible.builtin.shell: lspci -s {{ item }}- register: nvidia_gpu_info- changed_when: false- with_items: "{{ nvidia_gpu_ids.stdout_lines }}"+ - name: Nvidia Setup block+ when: nvidia_gpus_found+ block:+ - name: Fetch detailed info for all Nvidia GPUs+ ansible.builtin.shell: lspci -s {{ item }}+ register: nvidia_gpu_info+ changed_when: false+ with_items: "{{ nvidia_gpu_ids.stdout_lines }}" - - name: Create a list of all GPU details- ansible.builtin.set_fact:- all_nvidia_gpus: "{{ nvidia_gpu_info.results | map(attribute='stdout') | list }}"+ - name: Create a list of all GPU details+ ansible.builtin.set_fact:+ all_nvidia_gpus: "{{ nvidia_gpu_info.results | map(attribute='stdout') | list }}" - - name: Check if any GeForce GPUs are present- ansible.builtin.set_fact:- geforce_gpu_present: "{{ all_nvidia_gpus | select('search', 'GeForce') | list | length > 0 }}"+ - name: Check if any GeForce GPUs are present+ ansible.builtin.set_fact:+ geforce_gpu_present: "{{ all_nvidia_gpus | select('search', 'GeForce') | list | length > 0 }}" - - name: Display detected NVIDIA GPUs- ansible.builtin.debug:- msg:- - "Detected {{ all_nvidia_gpus | length }} NVIDIA GPU(s):"- - "{{ all_nvidia_gpus }}"- - "GeForce GPU present: {{ geforce_gpu_present | default(false) }}"-- - name: Fetch NVIDIA driver patch script- ansible.builtin.uri:- url: "{{ nvidia_patch_download_url }}"- return_content: yes- register: patch_script-- - name: Extract supported driver versions- ansible.builtin.set_fact:- supported_versions: >-- {{ patch_script.content | regex_findall('\[\"([0-9.]+)\"\]') | map('regex_replace', '\"', '') | unique | sort }}-- - name: Latest supported version- ansible.builtin.debug:- msg: "Latest supported version is {{ supported_versions[-1] }}"-- - name: Define latest version- ansible.builtin.set_fact:- nvidia_driver_version: "{{ supported_versions[-1] }}"- when: nvidia_driver_version == "latest"-- - name: Check for incompatible version (GeForce patching)- ansible.builtin.fail:- msg: "Driver version: {{ nvidia_driver_version }} is not supported by the keylase patch."- when: geforce_gpu_present and (nvidia_driver_version != "latest") and nvidia_driver_version not in supported_versions-- - name: Get nvidia-smi driver version- ansible.builtin.shell: "nvidia-smi --query-gpu=driver_version --format=csv,noheader"- register: nvidia_smi_output- ignore_errors: yes-- - name: Set 'nvidia_installed_driver_version' variable- ansible.builtin.set_fact:- nvidia_installed_driver_version: "{{ nvidia_smi_output.stdout_lines[0] | default('')- if nvidia_smi_output.rc == 0- else '' }}"-- - name: Print driver state- ansible.builtin.debug:- msg:- - "Desired driver version: {{ nvidia_driver_version }}"- - "Current driver version: {{ nvidia_installed_driver_version- if (nvidia_installed_driver_version | length > 0)- else 'not installed' }}"-- - name: Nvidia Purge Drivers Tasks- when: ('nvidia-purge' in ansible_run_tags) or ((nvidia_installed_driver_version != nvidia_driver_version) and (nvidia_installed_driver_version | length > 0) and ('nvidia' in ansible_run_tags) and (nvidia_driver_version | lower != 'ignore'))- block:- - name: Purge Nvidia APT packages- ansible.builtin.shell: "dpkg --force-depends -P $(dpkg -l | grep nvidia | awk '{print $2}')"- register: dpkg_purge_output- ignore_errors: yes-- - name: Remove dependencies that are no longer required- ansible.builtin.apt:- autoremove: yes-- - name: Success message+ - name: Display detected NVIDIA GPUs ansible.builtin.debug: msg:- - "Purged Nvidia drivers. System will now reboot."- - "You will need to re-run your previous tag after the system has rebooted."- when: (dpkg_purge_output.stdout | length > 0) and (dpkg_purge_output.rc == 0)+ - "Detected {{ all_nvidia_gpus | length }} NVIDIA GPU(s):"+ - "{{ all_nvidia_gpus }}"+ - "GeForce GPU present: {{ geforce_gpu_present | default(false) }}" - - name: Prompt user before continuing- ansible.builtin.pause:- prompt: "Read the instructions above and press enter when you have done so"- when: (dpkg_purge_output.stdout | length > 0) and (dpkg_purge_output.rc == 0)+ - name: Fetch NVIDIA driver patch script+ ansible.builtin.uri:+ url: "{{ nvidia_patch_download_url }}"+ return_content: true+ register: patch_script - - name: Reboot- ansible.builtin.shell: reboot- when: (dpkg_purge_output.stdout | length > 0) and (dpkg_purge_output.rc == 0)+ - name: Extract supported driver versions+ ansible.builtin.set_fact:+ supported_versions: >-+ {{ patch_script.content | regex_findall('\[\"([0-9.]+)\"\]') | map('regex_replace', '\"', '') | unique | sort }} - - name: Check if 'blacklist-nouveau.conf' exists- ansible.builtin.stat:- path: "{{ nvidia_kernel_blacklist_nouveau_conf_location }}"- register: blacklist_nouveau_conf+ - name: Latest supported version+ ansible.builtin.debug:+ msg: "Latest supported version is {{ supported_versions[-1] }}" - - name: "Nvidia Kernel Task"- ansible.builtin.include_tasks: "subtasks/kernel.yml"- when:- - (not blacklist_nouveau_conf.stat.exists)- - nvidia_kernel_blacklist_nouveau+ - name: Define latest version+ ansible.builtin.set_fact:+ nvidia_driver_version: "{{ supported_versions[-1] }}"+ when: nvidia_driver_version == "latest" - - name: Nvidia Driver Tasks- when: ((nvidia_installed_driver_version != nvidia_driver_version) and (nvidia_driver_version | lower != 'ignore') and ('nvidia' in ansible_run_tags)) or (nvidia_installed_driver_version | length == 0)- block:- - name: "Nvidia Driver Task"- ansible.builtin.include_tasks: "subtasks/driver.yml"+ - name: Check for incompatible version (GeForce patching)+ ansible.builtin.fail:+ msg: "Driver version: {{ nvidia_driver_version }} is not supported by the keylase patch."+ when: geforce_gpu_present and (nvidia_driver_version != "latest") and nvidia_driver_version not in supported_versions - - name: "Cleanup Patch backup"- ansible.builtin.file:- path: "{{ nvidia_patch_backup_file_location }}"- state: absent+ - name: Get nvidia-smi driver version+ ansible.builtin.shell: "nvidia-smi --query-gpu=driver_version --format=csv,noheader"+ register: nvidia_smi_output+ ignore_errors: true - - name: Nvidia Driver Patch Tasks- when: geforce_gpu_present- block:- - name: Check to see if patch backup files exist+ - name: Set 'nvidia_installed_driver_version' variable+ ansible.builtin.set_fact:+ nvidia_installed_driver_version: "{{ nvidia_smi_output.stdout_lines[0] | default('')+ if nvidia_smi_output.rc == 0+ else '' }}"++ - name: Print driver state+ ansible.builtin.debug:+ msg:+ - "Desired driver version: {{ nvidia_driver_version }}"+ - "Current driver version: {{ nvidia_installed_driver_version+ if (nvidia_installed_driver_version | length > 0)+ else 'not installed' }}"++ - name: Nvidia Purge Drivers Tasks+ when: ('nvidia-purge' in ansible_run_tags) or ((nvidia_installed_driver_version != nvidia_driver_version) and (nvidia_installed_driver_version | length > 0) and ('nvidia' in ansible_run_tags))+ block:+ - name: Purge Nvidia APT packages+ ansible.builtin.shell: "dpkg --force-depends -P $(dpkg -l | grep nvidia | awk '{print $2}')"+ register: dpkg_purge_output+ ignore_errors: true++ - name: Remove dependencies that are no longer required+ ansible.builtin.apt:+ autoremove: true++ - name: Success message+ ansible.builtin.debug:+ msg:+ - "Purged Nvidia drivers. System will now reboot."+ - "You will need to re-run your previous tag after the system has rebooted."+ when: (dpkg_purge_output.stdout | length > 0) and (dpkg_purge_output.rc == 0)++ - name: Prompt user before continuing+ ansible.builtin.pause:+ prompt: "Read the instructions above and press enter when you have done so"+ when: (dpkg_purge_output.stdout | length > 0) and (dpkg_purge_output.rc == 0)++ - name: Reboot+ ansible.builtin.shell: reboot+ when: (dpkg_purge_output.stdout | length > 0) and (dpkg_purge_output.rc == 0)++ - name: Check if 'blacklist-nouveau.conf' exists ansible.builtin.stat:- path: "{{ nvidia_patch_backup_file_location }}"- register: nvidia_patch_backup_folder+ path: "{{ nvidia_kernel_blacklist_nouveau_conf_location }}"+ register: blacklist_nouveau_conf - - name: "Nvidia Driver Patch Task"- ansible.builtin.include_tasks: "subtasks/patch.yml"- when: (not nvidia_patch_backup_folder.stat.exists)+ - name: "Nvidia Kernel Task"+ ansible.builtin.include_tasks: "subtasks/kernel.yml"+ when:+ - (not blacklist_nouveau_conf.stat.exists)+ - nvidia_kernel_blacklist_nouveau - - name: "Nvidia Nvtop Task"- ansible.builtin.include_tasks: "subtasks/nvtop.yml"+ - name: Nvidia Driver Tasks+ when: ((nvidia_installed_driver_version != nvidia_driver_version) and ('nvidia' in ansible_run_tags)) or (nvidia_installed_driver_version | length == 0)+ block:+ - name: "Nvidia Driver Task"+ ansible.builtin.include_tasks: "subtasks/driver.yml" - - name: "Install Docker"- ansible.builtin.include_role:- name: "docker"- when: ('nvidia' in ansible_run_tags)+ - name: "Cleanup Patch backup"+ ansible.builtin.file:+ path: "{{ nvidia_patch_backup_file_location }}"+ state: absent++ - name: Nvidia Driver Patch Tasks+ when: geforce_gpu_present+ block:+ - name: Check to see if patch backup files exist+ ansible.builtin.stat:+ path: "{{ nvidia_patch_backup_file_location }}"+ register: nvidia_patch_backup_folder++ - name: "Nvidia Driver Patch Task"+ ansible.builtin.include_tasks: "subtasks/patch.yml"+ when: (not nvidia_patch_backup_folder.stat.exists)++ - name: "Nvidia Nvtop Task"+ ansible.builtin.include_tasks: "subtasks/nvtop.yml"++ - name: "Install Docker"+ ansible.builtin.include_role:+ name: "docker"+ when: ('nvidia' in ansible_run_tags)
modified
roles/nvidia/tasks/subtasks/driver.yml
@@ -18,7 +18,7 @@ state: latest - name: Driver | Install gcc12 if kernel is 6.5>= on Jammy- when: (ansible_kernel is version('6.5', '>=')) and (ansible_distribution_version is version('22.04', '=='))+ when: (ansible_facts['kernel'] is version('6.5', '>=')) and (ansible_facts['distribution_version'] is version('22.04', '==')) block: - name: Driver | Install GCC and G++ ansible.builtin.apt:
modified
roles/nvidia/tasks/subtasks/nvtop.yml
@@ -21,7 +21,7 @@ retries: "{{ ansible_retry_count if (not continuous_integration) else ansible_retry_count_ci }}" delay: 10 until: result is succeeded- when: ansible_distribution_release in ['jammy', 'focal']+ when: ansible_facts['distribution_release'] in ['jammy', 'focal'] - name: Nvidia | Remove old repository list ansible.builtin.file:@@ -30,11 +30,11 @@ - name: nvtop | Add flexiondotorg/nvtop repository ansible.builtin.apt_repository:- repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/flexiondotorg.asc] https://ppa.launchpadcontent.net/flexiondotorg/nvtop/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} main"+ repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/flexiondotorg.asc] https://ppa.launchpadcontent.net/flexiondotorg/nvtop/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} main" filename: "flexiondotorg" state: present update_cache: true- when: ansible_distribution_release in ['jammy', 'focal']+ when: ansible_facts['distribution_release'] in ['jammy', 'focal'] - name: nvtop | Install nvtop ansible.builtin.apt:
modified
roles/nzbget/defaults/main.yml
@@ -17,51 +17,55 @@ # Paths ################################ -nzbget_paths_folder: "{{ nzbget_name }}"-nzbget_paths_location: "{{ server_appdata_path }}/{{ nzbget_paths_folder }}"-nzbget_paths_downloads_location: "{{ downloads_usenet_path }}/{{ nzbget_paths_folder }}"-nzbget_paths_folders_list:- - "{{ nzbget_paths_location }}"- - "{{ nzbget_paths_downloads_location }}"- - "{{ nzbget_paths_downloads_location }}/completed"- - "{{ nzbget_paths_downloads_location }}/completed/radarr"- - "{{ nzbget_paths_downloads_location }}/completed/sonarr"- - "{{ nzbget_paths_downloads_location }}/completed/lidarr"-nzbget_paths_config_location: "{{ nzbget_paths_location }}/nzbget.conf"+nzbget_role_paths_folder: "{{ nzbget_name }}"+nzbget_role_paths_location: "{{ server_appdata_path }}/{{ nzbget_role_paths_folder }}"+nzbget_role_paths_downloads_location: "{{ downloads_usenet_path }}/{{ nzbget_role_paths_folder }}"+nzbget_role_paths_folders_list:+ - "{{ nzbget_role_paths_location }}"+ - "{{ nzbget_role_paths_downloads_location }}"+ - "{{ nzbget_role_paths_downloads_location }}/completed"+ - "{{ nzbget_role_paths_downloads_location }}/completed/radarr"+ - "{{ nzbget_role_paths_downloads_location }}/completed/sonarr"+ - "{{ nzbget_role_paths_downloads_location }}/completed/lidarr"+nzbget_role_paths_config_location: "{{ nzbget_role_paths_location }}/nzbget.conf" ################################ # Web ################################ -nzbget_web_subdomain: "{{ nzbget_name }}"-nzbget_web_domain: "{{ user.domain }}"-nzbget_web_port: "6789"-nzbget_web_login: "{{ user.name }}:{{ user.pass }}"-nzbget_web_url_with_login: "{{ 'https://' + nzbget_web_login + '@' + nzbget_web_subdomain + '.' + nzbget_web_domain }}"-nzbget_web_local_url_web_login: "{{ 'http://' + nzbget_web_login + '@' + nzbget_name + ':' + nzbget_web_port }}"+nzbget_role_web_subdomain: "{{ nzbget_name }}"+nzbget_role_web_domain: "{{ user.domain }}"+nzbget_role_web_port: "6789"+nzbget_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='nzbget') + '.' + lookup('role_var', '_web_domain', role='nzbget')+ if (lookup('role_var', '_web_subdomain', role='nzbget') | length > 0)+ else lookup('role_var', '_web_domain', role='nzbget')) }}"+nzbget_role_web_login: "{{ user.name }}:{{ user.pass }}"+nzbget_role_web_url_with_login: "{{ 'https://' + lookup('role_var', '_web_login', role='nzbget') + '@' + lookup('role_var', '_web_subdomain', role='nzbget') + '.' + lookup('role_var', '_web_domain', role='nzbget') }}"+nzbget_role_web_local_url: "{{ 'http://' + nzbget_name + ':' + lookup('role_var', '_web_port', role='nzbget') }}"+nzbget_role_web_local_url_web_login: "{{ 'http://' + lookup('role_var', '_web_login', role='nzbget') + '@' + nzbget_name + ':' + lookup('role_var', '_web_port', role='nzbget') }}" ################################ # DNS ################################ -nzbget_dns_record: "{{ nzbget_web_subdomain }}"-nzbget_dns_zone: "{{ nzbget_web_domain }}"-nzbget_dns_proxy: "{{ dns.proxied }}"+nzbget_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='nzbget') }}"+nzbget_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='nzbget') }}"+nzbget_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -nzbget_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-nzbget_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', nzbget_name + '_name', default=nzbget_name)- if (nzbget_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-nzbget_traefik_middleware_custom: ""-nzbget_traefik_certresolver: "{{ traefik_default_certresolver }}"-nzbget_traefik_enabled: true-nzbget_traefik_api_enabled: true-nzbget_traefik_api_endpoint: "PathRegexp(`^/[A-Za-z0-9]+:[A-Za-z0-9]+/(xml|json|jsonp)rpc`) || PathRegexp(`^/(xml|json|jsonp)rpc`)"+nzbget_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+nzbget_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + nzbget_name+ if (lookup('role_var', '_themepark_enabled', role='nzbget') and global_themepark_plugin_enabled)+ else '') }}"+nzbget_role_traefik_middleware_custom: ""+nzbget_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+nzbget_role_traefik_enabled: true+nzbget_role_traefik_api_enabled: true+nzbget_role_traefik_api_endpoint: "PathRegexp(`^/[A-Za-z0-9]+:[A-Za-z0-9]+/(xml|json|jsonp)rpc`) || PathRegexp(`^/(xml|json|jsonp)rpc`)" ################################ # Config@@ -69,7 +73,7 @@ # New Installs -nzbget_config_new_installs_settings_default:+nzbget_role_config_new_installs_settings_default: # Authentication - { regexp: '^ControlUsername\s?=.*', line: "ControlUsername={{ user.name }}" } - { regexp: '^ControlPassword\s?=.*', line: "ControlPassword={{ user.pass }}" }@@ -119,13 +123,13 @@ .duplicate1.rar, .srs, .info, .txt, .com, .md5, .png, .1, .url, .jpg, .xxx, .rev, .iso, .img, .ifo, .vob' }-nzbget_config_new_installs_settings_custom: []-nzbget_config_new_installs_settings_list: "{{ nzbget_config_new_installs_settings_default- + nzbget_config_new_installs_settings_custom }}"+nzbget_role_config_new_installs_settings_custom: []+nzbget_role_config_new_installs_settings_list: "{{ lookup('role_var', '_config_new_installs_settings_default', role='nzbget')+ + lookup('role_var', '_config_new_installs_settings_custom', role='nzbget') }}" # Existing Installs -nzbget_config_existing_installs_settings_default:+nzbget_role_config_existing_installs_settings_default: # Logging - { regexp: '^WriteLog\s?=.*', line: 'WriteLog=rotate' } - { regexp: '^RotateLog\s?=.*', line: 'RotateLog=3' }@@ -134,9 +138,9 @@ # Unpacking - { regexp: '^UnrarCmd\s?=.*', line: 'UnrarCmd=ionice -c3 /usr/bin/unrar' } - { regexp: '^SevenZipCmd\s?=.*', line: 'SevenZipCmd=ionice -c3 /usr/bin/7z' }-nzbget_config_existing_installs_settings_custom: []-nzbget_config_existing_installs_settings_list: "{{ nzbget_config_existing_installs_settings_default- + nzbget_config_existing_installs_settings_custom }}"+nzbget_role_config_existing_installs_settings_custom: []+nzbget_role_config_existing_installs_settings_list: "{{ lookup('role_var', '_config_existing_installs_settings_default', role='nzbget')+ + lookup('role_var', '_config_existing_installs_settings_custom', role='nzbget') }}" ################################ # Scripts@@ -144,136 +148,99 @@ # Paths # Default nzbget_scripts_paths_location = /opt/scripts/nzbget-nzbget_scripts_paths_location: "{{ server_appdata_path }}/scripts/{{ nzbget_paths_folder }}"-nzbget_scripts_paths_folders_list:- - "{{ nzbget_scripts_paths_location }}"- - "{{ nzbget_scripts_paths_location }}/nzbgetpp"-nzbget_scripts_paths_rarfile_py_location: "{{ nzbget_scripts_paths_location }}/nzbgetpp/rarfile/rarfile.py"+nzbget_role_scripts_paths_location: "{{ server_appdata_path }}/scripts/{{ nzbget_role_paths_folder }}"+nzbget_role_scripts_paths_folders_list:+ - "{{ nzbget_role_scripts_paths_location }}"+ - "{{ nzbget_role_scripts_paths_location }}/nzbgetpp"+nzbget_role_scripts_paths_rarfile_py_location: "{{ nzbget_role_scripts_paths_location }}/nzbgetpp/rarfile/rarfile.py" # Repos Downloaded-nzbget_scripts_repos_default:+nzbget_role_scripts_repos_default: - 'https://github.com/Prinz23/nzbgetpp.git'-nzbget_scripts_repos_custom: []-nzbget_scripts_repos_list: "{{ nzbget_scripts_repos_default + nzbget_scripts_repos_custom }}"+nzbget_role_scripts_repos_custom: []+nzbget_role_scripts_repos_list: "{{ lookup('role_var', '_scripts_repos_default', role='nzbget') + lookup('role_var', '_scripts_repos_custom', role='nzbget') }}" # URLs Downloaded-nzbget_scripts_direct_downloads_default:+nzbget_role_scripts_direct_downloads_default: - "https://raw.githubusercontent.com/clinton-hall/GetScripts/master/flatten.py" - "https://raw.githubusercontent.com/clinton-hall/GetScripts/master/DeleteSamples.py" - "https://raw.githubusercontent.com/Prinz23/nzbget-pp-reverse/master/reverse_name.py" - "https://raw.githubusercontent.com/l3uddz/nzbgetScripts/master/HashRenamer.py"-nzbget_scripts_direct_downloads_custom: []-nzbget_scripts_direct_downloads_list: "{{ nzbget_scripts_direct_downloads_default- + nzbget_scripts_direct_downloads_custom }}"+nzbget_role_scripts_direct_downloads_custom: []+nzbget_role_scripts_direct_downloads_list: "{{ lookup('role_var', '_scripts_direct_downloads_default', role='nzbget')+ + lookup('role_var', '_scripts_direct_downloads_custom', role='nzbget') }}" # Locally Copied-nzbget_scripts_local_copy_default: []-nzbget_scripts_local_copy_custom: []-nzbget_scripts_local_copy_list: "{{ nzbget_scripts_local_copy_default- + nzbget_scripts_local_copy_custom }}"--################################-# THEME+nzbget_role_scripts_local_copy_default: []+nzbget_role_scripts_local_copy_custom: []+nzbget_role_scripts_local_copy_list: "{{ lookup('role_var', '_scripts_local_copy_default', role='nzbget')+ + lookup('role_var', '_scripts_local_copy_custom', role='nzbget') }}"++################################+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-nzbget_themepark_enabled: false-nzbget_themepark_app: "nzbget"-nzbget_themepark_theme: "{{ global_themepark_theme }}"-nzbget_themepark_domain: "{{ global_themepark_domain }}"-nzbget_themepark_addons: []+nzbget_role_themepark_enabled: false+nzbget_role_themepark_app: "nzbget"+nzbget_role_themepark_theme: "{{ global_themepark_theme }}"+nzbget_role_themepark_domain: "{{ global_themepark_domain }}"+nzbget_role_themepark_addons: [] ################################ # Docker ################################ # Container-nzbget_docker_container: "{{ nzbget_name }}"+nzbget_role_docker_container: "{{ nzbget_name }}" # Image-nzbget_docker_image_pull: true-nzbget_docker_image_tag: "release"-nzbget_docker_image: "ghcr.io/hotio/nzbget:{{ nzbget_docker_image_tag }}"--# Ports-nzbget_docker_ports_defaults: []-nzbget_docker_ports_custom: []-nzbget_docker_ports: "{{ nzbget_docker_ports_defaults- + nzbget_docker_ports_custom }}"+nzbget_role_docker_image_pull: true+nzbget_role_docker_image_repo: "ghcr.io/hotio/nzbget"+nzbget_role_docker_image_tag: "release"+nzbget_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='nzbget') }}:{{ lookup('role_var', '_docker_image_tag', role='nzbget') }}" # Envs-nzbget_docker_envs_default:+nzbget_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" UMASK: "002" TZ: "{{ tz }}" LC_ALL: "C"-nzbget_docker_envs_custom: {}-nzbget_docker_envs: "{{ nzbget_docker_envs_default- | combine(nzbget_docker_envs_custom) }}"--# Commands-nzbget_docker_commands_default: []-nzbget_docker_commands_custom: []-nzbget_docker_commands: "{{ nzbget_docker_commands_default- + nzbget_docker_commands_custom }}"+nzbget_role_docker_envs_custom: {}+nzbget_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='nzbget')+ | combine(lookup('role_var', '_docker_envs_custom', role='nzbget')) }}" # Volumes-nzbget_docker_volumes_default:- - "{{ nzbget_paths_location }}:/config"+nzbget_role_docker_volumes_default:+ - "{{ nzbget_role_paths_location }}:/config" - "{{ server_appdata_path }}/scripts:/scripts"-nzbget_docker_volumes_custom: []-nzbget_docker_volumes: "{{ nzbget_docker_volumes_default- + nzbget_docker_volumes_custom }}"--# Devices-nzbget_docker_devices_default: []-nzbget_docker_devices_custom: []-nzbget_docker_devices: "{{ nzbget_docker_devices_default- + nzbget_docker_devices_custom }}"--# Hosts-nzbget_docker_hosts_default: {}-nzbget_docker_hosts_custom: {}-nzbget_docker_hosts: "{{ docker_hosts_common- | combine(nzbget_docker_hosts_default)- | combine(nzbget_docker_hosts_custom) }}"+nzbget_role_docker_volumes_custom: []+nzbget_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='nzbget')+ + lookup('role_var', '_docker_volumes_custom', role='nzbget') }}" # Labels-nzbget_docker_labels_default: {}-nzbget_docker_labels_custom: {}-nzbget_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', nzbget_name + '_docker_labels_default', default=nzbget_docker_labels_default))- | combine((traefik_themepark_labels- if (nzbget_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', nzbget_name + '_docker_labels_custom', default=nzbget_docker_labels_custom)) }}"+nzbget_role_docker_labels_default: {}+nzbget_role_docker_labels_custom: {}+nzbget_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='nzbget')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='nzbget') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='nzbget')) }}" # Hostname-nzbget_docker_hostname: "{{ nzbget_name }}"+nzbget_role_docker_hostname: "{{ nzbget_name }}" # Networks-nzbget_docker_networks_alias: "{{ nzbget_name }}"-nzbget_docker_networks_default: []-nzbget_docker_networks_custom: []-nzbget_docker_networks: "{{ docker_networks_common- + nzbget_docker_networks_default- + nzbget_docker_networks_custom }}"--# Capabilities-nzbget_docker_capabilities_default: []-nzbget_docker_capabilities_custom: []-nzbget_docker_capabilities: "{{ nzbget_docker_capabilities_default- + nzbget_docker_capabilities_custom }}"--# Security Opts-nzbget_docker_security_opts_default: []-nzbget_docker_security_opts_custom: []-nzbget_docker_security_opts: "{{ nzbget_docker_security_opts_default- + nzbget_docker_security_opts_custom }}"+nzbget_role_docker_networks_alias: "{{ nzbget_name }}"+nzbget_role_docker_networks_default: []+nzbget_role_docker_networks_custom: []+nzbget_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='nzbget')+ + lookup('role_var', '_docker_networks_custom', role='nzbget') }}" # Restart Policy-nzbget_docker_restart_policy: unless-stopped+nzbget_role_docker_restart_policy: unless-stopped # State-nzbget_docker_state: started+nzbget_role_docker_state: started
modified
roles/nzbget/tasks/main.yml
@@ -11,9 +11,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -23,12 +23,12 @@ - name: Check if existing config exists ansible.builtin.stat:- path: "{{ nzbget_paths_config_location }}"+ path: "{{ nzbget_role_paths_config_location }}" register: nzbget_paths_config_location_stat - name: Create Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: Post-Install Tasks- ansible.builtin.import_tasks: "subtasks/post-install/main.yml"+ ansible.builtin.include_tasks: "subtasks/post-install/main.yml" when: (not continuous_integration)
modified
roles/nzbget/tasks/subtasks/post-install/main.yml
@@ -9,7 +9,7 @@ --- - name: Post-Install | Wait for config file to be created ansible.builtin.wait_for:- path: "{{ nzbget_paths_config_location }}"+ path: "{{ nzbget_role_paths_config_location }}" state: present - name: Post-Install | Wait for 10 seconds@@ -19,15 +19,15 @@ - name: Post-Install | Stop container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/stop_docker_container.yml" vars:- var_prefix: "{{ nzbget_docker_container }}"+ var_prefix: "{{ nzbget_role_docker_container }}" - name: Post-Install | Scripts Task- ansible.builtin.import_tasks: "scripts/main.yml"+ ansible.builtin.include_tasks: "scripts/main.yml" - name: Post-Install | Settings Task- ansible.builtin.import_tasks: "settings/main.yml"+ ansible.builtin.include_tasks: "settings/main.yml" - name: Post-Install | Start container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_docker_container.yml" vars:- var_prefix: "{{ nzbget_docker_container }}"+ var_prefix: "{{ nzbget_role_docker_container }}"
modified
roles/nzbget/tasks/subtasks/post-install/scripts/main.yml
@@ -14,54 +14,48 @@ owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775"- with_items: "{{ nzbget_scripts_paths_folders_list }}"--# Add Scripts+ with_items: "{{ lookup('role_var', '_scripts_paths_folders_list', role='nzbget') }}" - name: Post-Install | Scripts | Download scripts repos ansible.builtin.git: repo: "{{ item }}"- dest: "{{ nzbget_scripts_paths_location }}/{{ (item | basename | splitext)[0] }}"+ dest: "{{ lookup('role_var', '_scripts_paths_location', role='nzbget') }}/{{ (item | basename | splitext)[0] }}" clone: true version: HEAD force: true become: true become_user: "{{ user.name }}"- loop: "{{ nzbget_scripts_repos_list }}"- ignore_errors: true+ loop: "{{ lookup('role_var', '_scripts_repos_list', role='nzbget') }}" - name: Post-Install | Scripts | Download script URLs ansible.builtin.get_url: url: "{{ item }}"- dest: "{{ nzbget_scripts_paths_location }}"+ dest: "{{ lookup('role_var', '_scripts_paths_location', role='nzbget') }}" validate_certs: false force: true owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775"- loop: "{{ nzbget_scripts_direct_downloads_list }}"- ignore_errors: true+ loop: "{{ lookup('role_var', '_scripts_direct_downloads_list', role='nzbget') }}" - name: Post-Install | Scripts | Import local scripts ansible.builtin.copy: src: "{{ item }}"- dest: "{{ nzbget_scripts_paths_location }}/{{ item }}"+ dest: "{{ lookup('role_var', '_scripts_paths_location', role='nzbget') }}/{{ item }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775" force: true- loop: "{{ nzbget_scripts_local_copy_list }}"--# Customize Scripts+ loop: "{{ lookup('role_var', '_scripts_local_copy_list', role='nzbget') }}" - name: Post-Install | Scripts | Check if 'nzbgetpp/rarfile/rarfile.py' exists ansible.builtin.stat:- path: "{{ nzbget_scripts_paths_rarfile_py_location }}"+ path: "{{ lookup('role_var', '_scripts_paths_rarfile_py_location', role='nzbget') }}" register: nzbget_scripts_rarfile_py_stat - name: Post-Install | Scripts | Add unrar path to 'nzbgetpp/rarfile/rarfile.py' ansible.builtin.lineinfile:- path: "{{ nzbget_scripts_paths_rarfile_py_location }}"+ path: "{{ lookup('role_var', '_scripts_paths_rarfile_py_location', role='nzbget') }}" regexp: '^UNRAR_TOOL\s?=.*' line: 'UNRAR_TOOL = "/usr/bin/unrar"' state: present
modified
roles/nzbget/tasks/subtasks/post-install/settings/main.yml
@@ -9,23 +9,23 @@ --- - name: Post-Install | Settings | Update settings For New Installs ansible.builtin.lineinfile:- path: "{{ nzbget_paths_config_location }}"+ path: "{{ nzbget_role_paths_config_location }}" regexp: '{{ item.regexp }}' line: "{{ item.line }}" state: present owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"- loop: "{{ nzbget_config_new_installs_settings_list }}"+ loop: "{{ lookup('role_var', '_config_new_installs_settings_list', role='nzbget') }}" when: (not nzbget_paths_config_location_stat.stat.exists) - name: Post-Install | Settings | Set settings For Existing Installs ansible.builtin.lineinfile:- path: "{{ nzbget_paths_config_location }}"+ path: "{{ nzbget_role_paths_config_location }}" regexp: '{{ item.regexp }}' line: "{{ item.line }}" state: present owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"- loop: "{{ nzbget_config_existing_installs_settings_list }}"+ loop: "{{ lookup('role_var', '_config_existing_installs_settings_list', role='nzbget') }}"
modified
roles/nzbhydra2/defaults/main.yml
@@ -17,67 +17,67 @@ # Paths ################################ -nzbhydra2_paths_folder: "{{ nzbhydra2_name }}"-nzbhydra2_paths_location: "{{ server_appdata_path }}/{{ nzbhydra2_paths_folder }}"-nzbhydra2_paths_downloads_location: "{{ downloads_usenet_path }}/{{ nzbhydra2_paths_folder }}"-nzbhydra2_paths_folders_list:- - "{{ nzbhydra2_paths_location }}"-nzbhydra2_paths_config_location: "{{ nzbhydra2_paths_location }}/nzbhydra.yml"+nzbhydra2_role_paths_folder: "{{ nzbhydra2_name }}"+nzbhydra2_role_paths_location: "{{ server_appdata_path }}/{{ nzbhydra2_role_paths_folder }}"+nzbhydra2_role_paths_downloads_location: "{{ downloads_usenet_path }}/{{ nzbhydra2_role_paths_folder }}"+nzbhydra2_role_paths_folders_list:+ - "{{ nzbhydra2_role_paths_location }}"+nzbhydra2_role_paths_config_location: "{{ nzbhydra2_role_paths_location }}/nzbhydra.yml" ################################ # Web ################################ -nzbhydra2_web_subdomain: "{{ nzbhydra2_name }}"-nzbhydra2_web_domain: "{{ user.domain }}"-nzbhydra2_web_port: "5076"-nzbhydra2_web_url: "{{ 'https://' + (nzbhydra2_web_subdomain + '.' + nzbhydra2_web_domain- if (nzbhydra2_web_subdomain | length > 0)- else nzbhydra2_web_domain) }}"+nzbhydra2_role_web_subdomain: "{{ nzbhydra2_name }}"+nzbhydra2_role_web_domain: "{{ user.domain }}"+nzbhydra2_role_web_port: "5076"+nzbhydra2_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='nzbhydra2') + '.' + lookup('role_var', '_web_domain', role='nzbhydra2')+ if (lookup('role_var', '_web_subdomain', role='nzbhydra2') | length > 0)+ else lookup('role_var', '_web_domain', role='nzbhydra2')) }}" ################################ # DNS ################################ -nzbhydra2_dns_record: "{{ nzbhydra2_web_subdomain }}"-nzbhydra2_dns_zone: "{{ nzbhydra2_web_domain }}"-nzbhydra2_dns_proxy: "{{ dns.proxied }}"+nzbhydra2_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='nzbhydra2') }}"+nzbhydra2_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='nzbhydra2') }}"+nzbhydra2_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -nzbhydra2_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-nzbhydra2_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', nzbhydra2_name + '_name', default=nzbhydra2_name)- if (nzbhydra2_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-nzbhydra2_traefik_middleware_custom: ""-nzbhydra2_traefik_certresolver: "{{ traefik_default_certresolver }}"-nzbhydra2_traefik_enabled: true-nzbhydra2_traefik_api_enabled: true-nzbhydra2_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/getnzb`) || PathPrefix(`/gettorrent`) || PathPrefix(`/rss`) || PathPrefix(`/torznab/api`)"+nzbhydra2_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+nzbhydra2_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + nzbhydra2_name+ if (lookup('role_var', '_themepark_enabled', role='nzbhydra2') and global_themepark_plugin_enabled)+ else '') }}"+nzbhydra2_role_traefik_middleware_custom: ""+nzbhydra2_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+nzbhydra2_role_traefik_enabled: true+nzbhydra2_role_traefik_api_enabled: true+nzbhydra2_role_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/getnzb`) || PathPrefix(`/gettorrent`) || PathPrefix(`/rss`) || PathPrefix(`/torznab/api`)" ################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-nzbhydra2_themepark_enabled: false-nzbhydra2_themepark_app: "nzbhydra2"-nzbhydra2_themepark_theme: "{{ global_themepark_theme }}"-nzbhydra2_themepark_domain: "{{ global_themepark_domain }}"-nzbhydra2_themepark_addons: []+nzbhydra2_role_themepark_enabled: false+nzbhydra2_role_themepark_app: "nzbhydra2"+nzbhydra2_role_themepark_theme: "{{ global_themepark_theme }}"+nzbhydra2_role_themepark_domain: "{{ global_themepark_domain }}"+nzbhydra2_role_themepark_addons: [] ################################ # Config ################################ -nzbhydra2_config_settings_jvm_memory: "{{ ((ansible_memory_mb.real.total / 1024)- | round(0, 'ceil') | int >= 8)- | ternary('512', '256') }}"+nzbhydra2_role_config_settings_jvm_memory: "{{ ((ansible_facts['memory_mb']['real']['total'] / 1024)+ | round(0, 'ceil') | int >= 8)+ | ternary('512', '256') }}" -nzbhydra2_config_settings_default:+nzbhydra2_role_config_settings_default: # NZBGet - del(.downloading.downloaders) - .downloading.downloaders[0].apiKey = "{{ nzbhydra2_sabnzbd_api_lookup | default('not-found') }}"@@ -88,107 +88,70 @@ - .downloading.downloaders[0].name = "SABnzbd" | .downloading.downloaders[0].name style="double" - .downloading.downloaders[0].nzbAddingType = "UPLOAD" | .downloading.downloaders[0].nzbAddingType style="double" - .downloading.downloaders[0].downloaderType = "SABNZBD" | .downloading.downloaders[0].downloaderType style="double"- - .downloading.downloaders[0].url = "http://{{ sabnzbd_docker_networks_alias }}:{{ sabnzbd_web_port }}" | .downloading.downloaders[0].url style="double"+ - .downloading.downloaders[0].url = "http://{{ lookup('role_var', '_docker_networks_alias', role='sabnzbd') }}:{{ lookup('role_var', '_web_port', role='sabnzbd') }}" | .downloading.downloaders[0].url style="double" - .downloading.downloaders[0].username = null - .downloading.downloaders[0].password = null - .downloading.downloaders[0].addPaused = false # JVM Memory. If RAM >= 8GB, set XMX to 512, else 256.- - .main.xmx = {{ nzbhydra2_config_settings_jvm_memory }}+ - .main.xmx = {{ lookup('role_var', '_config_settings_jvm_memory', role='nzbhydra2') }} -nzbhydra2_config_settings_custom: []+nzbhydra2_role_config_settings_custom: [] -nzbhydra2_config_settings_list: "{{ nzbhydra2_config_settings_default- + nzbhydra2_config_settings_custom }}"+nzbhydra2_role_config_settings_list: "{{ lookup('role_var', '_config_settings_default', role='nzbhydra2')+ + lookup('role_var', '_config_settings_custom', role='nzbhydra2') }}" ################################ # Docker ################################ # Container-nzbhydra2_docker_container: "{{ nzbhydra2_name }}"+nzbhydra2_role_docker_container: "{{ nzbhydra2_name }}" # Image-nzbhydra2_docker_image_pull: true-nzbhydra2_docker_image_tag: "latest"-nzbhydra2_docker_image: "lscr.io/linuxserver/nzbhydra2:{{ nzbhydra2_docker_image_tag }}"--# Ports-nzbhydra2_docker_ports_defaults: []-nzbhydra2_docker_ports_custom: []-nzbhydra2_docker_ports: "{{ nzbhydra2_docker_ports_defaults- + nzbhydra2_docker_ports_custom }}"+nzbhydra2_role_docker_image_pull: true+nzbhydra2_role_docker_image_repo: "lscr.io/linuxserver/nzbhydra2"+nzbhydra2_role_docker_image_tag: "latest"+nzbhydra2_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='nzbhydra2') }}:{{ lookup('role_var', '_docker_image_tag', role='nzbhydra2') }}" # Envs-nzbhydra2_docker_envs_default:+nzbhydra2_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" UMASK: "002" TZ: "{{ tz }}"-nzbhydra2_docker_envs_custom: {}-nzbhydra2_docker_envs: "{{ nzbhydra2_docker_envs_default- | combine(nzbhydra2_docker_envs_custom) }}"--# Commands-nzbhydra2_docker_commands_default: []-nzbhydra2_docker_commands_custom: []-nzbhydra2_docker_commands: "{{ nzbhydra2_docker_commands_default- + nzbhydra2_docker_commands_custom }}"+nzbhydra2_role_docker_envs_custom: {}+nzbhydra2_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='nzbhydra2')+ | combine(lookup('role_var', '_docker_envs_custom', role='nzbhydra2')) }}" # Volumes-nzbhydra2_docker_volumes_default:- - "{{ nzbhydra2_paths_location }}:/config"-nzbhydra2_docker_volumes_custom: []-nzbhydra2_docker_volumes: "{{ nzbhydra2_docker_volumes_default- + nzbhydra2_docker_volumes_custom }}"--# Devices-nzbhydra2_docker_devices_default: []-nzbhydra2_docker_devices_custom: []-nzbhydra2_docker_devices: "{{ nzbhydra2_docker_devices_default- + nzbhydra2_docker_devices_custom }}"--# Hosts-nzbhydra2_docker_hosts_default: {}-nzbhydra2_docker_hosts_custom: {}-nzbhydra2_docker_hosts: "{{ docker_hosts_common- | combine(nzbhydra2_docker_hosts_default)- | combine(nzbhydra2_docker_hosts_custom) }}"+nzbhydra2_role_docker_volumes_default:+ - "{{ nzbhydra2_role_paths_location }}:/config"+nzbhydra2_role_docker_volumes_custom: []+nzbhydra2_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='nzbhydra2')+ + lookup('role_var', '_docker_volumes_custom', role='nzbhydra2') }}" # Labels-nzbhydra2_docker_labels_default: {}-nzbhydra2_docker_labels_custom: {}-nzbhydra2_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', nzbhydra2_name + '_docker_labels_default', default=nzbhydra2_docker_labels_default))- | combine((traefik_themepark_labels- if (nzbhydra2_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', nzbhydra2_name + '_docker_labels_custom', default=nzbhydra2_docker_labels_custom)) }}"+nzbhydra2_role_docker_labels_default: {}+nzbhydra2_role_docker_labels_custom: {}+nzbhydra2_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='nzbhydra2')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='nzbhydra2') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='nzbhydra2')) }}" # Hostname-nzbhydra2_docker_hostname: "{{ nzbhydra2_name }}"+nzbhydra2_role_docker_hostname: "{{ nzbhydra2_name }}" # Networks-nzbhydra2_docker_networks_alias: "{{ nzbhydra2_name }}"-nzbhydra2_docker_networks_default: []-nzbhydra2_docker_networks_custom: []-nzbhydra2_docker_networks: "{{ docker_networks_common- + nzbhydra2_docker_networks_default- + nzbhydra2_docker_networks_custom }}"--# Capabilities-nzbhydra2_docker_capabilities_default: []-nzbhydra2_docker_capabilities_custom: []-nzbhydra2_docker_capabilities: "{{ nzbhydra2_docker_capabilities_default- + nzbhydra2_docker_capabilities_custom }}"--# Security Opts-nzbhydra2_docker_security_opts_default: []-nzbhydra2_docker_security_opts_custom: []-nzbhydra2_docker_security_opts: "{{ nzbhydra2_docker_security_opts_default- + nzbhydra2_docker_security_opts_custom }}"+nzbhydra2_role_docker_networks_alias: "{{ nzbhydra2_name }}"+nzbhydra2_role_docker_networks_default: []+nzbhydra2_role_docker_networks_custom: []+nzbhydra2_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='nzbhydra2')+ + lookup('role_var', '_docker_networks_custom', role='nzbhydra2') }}" # Restart Policy-nzbhydra2_docker_restart_policy: unless-stopped+nzbhydra2_role_docker_restart_policy: unless-stopped # State-nzbhydra2_docker_state: started+nzbhydra2_role_docker_state: started
modified
roles/nzbhydra2/tasks/main.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -22,12 +22,12 @@ - name: Check if existing config exists ansible.builtin.stat:- path: "{{ nzbhydra2_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='nzbhydra2') }}" register: nzbhydra2_yml_stat - name: Create Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: Post-Install Tasks- ansible.builtin.import_tasks: "subtasks/post-install/main.yml"+ ansible.builtin.include_tasks: "subtasks/post-install/main.yml" when: (not continuous_integration)
modified
roles/nzbhydra2/tasks/subtasks/post-install/main.yml
@@ -9,13 +9,13 @@ --- - name: Post-Install | Check if SABnzbd config exists ansible.builtin.stat:- path: "{{ sabnzbd_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='sabnzbd') }}" register: nzbhydra2_sabnzbd_yml_stat - name: Post-Install | SABnzbd tasks- ansible.builtin.import_tasks: "sabnzbd.yml"+ ansible.builtin.include_tasks: "sabnzbd.yml" when: (not nzbhydra2_yml_stat.stat.exists) and nzbhydra2_sabnzbd_yml_stat.stat.exists - name: Post-Install | Settings Tweaks- ansible.builtin.import_tasks: "settings.yml"+ ansible.builtin.include_tasks: "settings.yml" when: (not nzbhydra2_yml_stat.stat.exists)
modified
roles/nzbhydra2/tasks/subtasks/post-install/sabnzbd.yml
@@ -9,4 +9,4 @@ --- - name: Find api_key value ansible.builtin.set_fact:- nzbhydra2_sabnzbd_api_lookup: "{{ lookup('file', sabnzbd_paths_config_location) | regex_search('^api_key *= *.*', multiline=True) | regex_replace('.*= *(.*)$', '\\1') }}"+ nzbhydra2_sabnzbd_api_lookup: "{{ lookup('file', lookup('role_var', '_paths_config_location', role='sabnzbd')) | regex_search('^api_key *= *.*', multiline=True) | regex_replace('.*= *(.*)$', '\\1') }}"
modified
roles/nzbhydra2/tasks/subtasks/post-install/settings.yml
@@ -9,7 +9,7 @@ --- - name: Post-Install | Settings | Wait for config file to be created ansible.builtin.wait_for:- path: "{{ nzbhydra2_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='nzbhydra2') }}" state: present - name: Wait 30 seconds@@ -21,10 +21,10 @@ - name: Post-Install | Settings | Update settings ansible.builtin.shell: |- yyq -i '{{ item }}' {{ nzbhydra2_paths_config_location }}+ yyq -i '{{ item }}' {{ lookup('role_var', '_paths_config_location', role='nzbhydra2') }} become: true become_user: "{{ user.name }}"- loop: "{{ nzbhydra2_config_settings_list }}"+ loop: "{{ lookup('role_var', '_config_settings_list', role='nzbhydra2') }}" - name: Post-Install | Settings | Start container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_docker_container.yml"
modified
roles/nzbthrottle/defaults/main.yml
@@ -17,95 +17,52 @@ # Paths ################################ -nzbthrottle_paths_folder: "{{ nzbthrottle_name }}"-nzbthrottle_paths_location: "{{ server_appdata_path }}/{{ nzbthrottle_paths_folder }}"-nzbthrottle_paths_folders_list:- - "{{ nzbthrottle_paths_location }}"-nzbthrottle_paths_config_location: "{{ nzbthrottle_paths_location }}/config.json"+nzbthrottle_role_paths_folder: "{{ nzbthrottle_name }}"+nzbthrottle_role_paths_location: "{{ server_appdata_path }}/{{ nzbthrottle_role_paths_folder }}"+nzbthrottle_role_paths_folders_list:+ - "{{ nzbthrottle_role_paths_location }}"+nzbthrottle_role_paths_config_location: "{{ nzbthrottle_role_paths_location }}/config.json" ################################ # Docker ################################ # Container-nzbthrottle_docker_container: "{{ nzbthrottle_name }}"+nzbthrottle_role_docker_container: "{{ nzbthrottle_name }}" # Image-nzbthrottle_docker_image_pull: true-nzbthrottle_docker_image_tag: "latest"-nzbthrottle_docker_image: "daghaian/nzbthrottle:{{ nzbthrottle_docker_image_tag }}"--# Ports-nzbthrottle_docker_ports_defaults: []-nzbthrottle_docker_ports_custom: []-nzbthrottle_docker_ports: "{{ nzbthrottle_docker_ports_defaults- + nzbthrottle_docker_ports_custom }}"+nzbthrottle_role_docker_image_pull: true+nzbthrottle_role_docker_image_repo: "daghaian/nzbthrottle"+nzbthrottle_role_docker_image_tag: "latest"+nzbthrottle_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='nzbthrottle') }}:{{ lookup('role_var', '_docker_image_tag', role='nzbthrottle') }}" # Envs-nzbthrottle_docker_envs_default:+nzbthrottle_role_docker_envs_default: TZ: "{{ tz }}"-nzbthrottle_docker_envs_custom: {}-nzbthrottle_docker_envs: "{{ nzbthrottle_docker_envs_default- | combine(nzbthrottle_docker_envs_custom) }}"--# Commands-nzbthrottle_docker_commands_default: []-nzbthrottle_docker_commands_custom: []-nzbthrottle_docker_commands: "{{ nzbthrottle_docker_commands_default- + nzbthrottle_docker_commands_custom }}"+nzbthrottle_role_docker_envs_custom: {}+nzbthrottle_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='nzbthrottle')+ | combine(lookup('role_var', '_docker_envs_custom', role='nzbthrottle')) }}" # Volumes-nzbthrottle_docker_volumes_default:- - "{{ nzbthrottle_paths_config_location }}:/nzbthrottle/config.json:ro"-nzbthrottle_docker_volumes_custom: []-nzbthrottle_docker_volumes: "{{ nzbthrottle_docker_volumes_default- + nzbthrottle_docker_volumes_custom }}"--# Devices-nzbthrottle_docker_devices_default: []-nzbthrottle_docker_devices_custom: []-nzbthrottle_docker_devices: "{{ nzbthrottle_docker_devices_default- + nzbthrottle_docker_devices_custom }}"--# Hosts-nzbthrottle_docker_hosts_default: {}-nzbthrottle_docker_hosts_custom: {}-nzbthrottle_docker_hosts: "{{ docker_hosts_common- | combine(nzbthrottle_docker_hosts_default)- | combine(nzbthrottle_docker_hosts_custom) }}"--# Labels-nzbthrottle_docker_labels_default: {}-nzbthrottle_docker_labels_custom: {}-nzbthrottle_docker_labels: "{{ docker_labels_common- | combine(nzbthrottle_docker_labels_default)- | combine(nzbthrottle_docker_labels_custom) }}"+nzbthrottle_role_docker_volumes_default:+ - "{{ nzbthrottle_role_paths_config_location }}:/nzbthrottle/config.json:ro"+nzbthrottle_role_docker_volumes_custom: []+nzbthrottle_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='nzbthrottle')+ + lookup('role_var', '_docker_volumes_custom', role='nzbthrottle') }}" # Hostname-nzbthrottle_docker_hostname: "{{ nzbthrottle_name }}"+nzbthrottle_role_docker_hostname: "{{ nzbthrottle_name }}" # Networks-nzbthrottle_docker_networks_alias: "{{ nzbthrottle_name }}"-nzbthrottle_docker_networks_default: []-nzbthrottle_docker_networks_custom: []-nzbthrottle_docker_networks: "{{ docker_networks_common- + nzbthrottle_docker_networks_default- + nzbthrottle_docker_networks_custom }}"--# Capabilities-nzbthrottle_docker_capabilities_default: []-nzbthrottle_docker_capabilities_custom: []-nzbthrottle_docker_capabilities: "{{ nzbthrottle_docker_capabilities_default- + nzbthrottle_docker_capabilities_custom }}"--# Security Opts-nzbthrottle_docker_security_opts_default: []-nzbthrottle_docker_security_opts_custom: []-nzbthrottle_docker_security_opts: "{{ nzbthrottle_docker_security_opts_default- + nzbthrottle_docker_security_opts_custom }}"+nzbthrottle_role_docker_networks_alias: "{{ nzbthrottle_name }}"+nzbthrottle_role_docker_networks_default: []+nzbthrottle_role_docker_networks_custom: []+nzbthrottle_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='nzbthrottle')+ + lookup('role_var', '_docker_networks_custom', role='nzbthrottle') }}" # Restart Policy-nzbthrottle_docker_restart_policy: unless-stopped+nzbthrottle_role_docker_restart_policy: unless-stopped # State-nzbthrottle_docker_state: started+nzbthrottle_role_docker_state: started
modified
roles/nzbthrottle/tasks/subtasks/pre-install.yml
@@ -13,11 +13,11 @@ - name: Pre-Install | Check if config exists ansible.builtin.stat:- path: "{{ nzbthrottle_paths_config_location }}"- register: nzbthrottle_paths_config_location_stat+ path: "{{ lookup('role_var', '_paths_config_location', role='nzbthrottle') }}"+ register: nzbthrottle_role_paths_config_location_stat - name: Pre-Install | New config tasks- when: (not nzbthrottle_paths_config_location_stat.stat.exists)+ when: (not nzbthrottle_role_paths_config_location_stat.stat.exists) block: - name: Pre-Install | Import Plex Auth Token role ansible.builtin.include_role:@@ -27,7 +27,7 @@ - name: Pre-Install | Import default config ansible.builtin.template: src: 'config.json.j2'- dest: "{{ nzbthrottle_paths_config_location }}"+ dest: "{{ lookup('role_var', '_paths_config_location', role='nzbthrottle') }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"
modified
roles/nzbthrottle/templates/config.json.j2
@@ -1,13 +1,13 @@ { "plex": {- "url": "{{ plex_web_url }}",+ "url": "{{ lookup('role_var', '_web_url', role='plex') }}", "interval": 60, "token": "{{ plex_auth_token | default('') }}" }, "nzbget": { "username": "{{ user.name }}", "password": "{{ user.pass }}",- "url": "http://nzbget:6789",+ "url": "http://{{ nzbget_name }}:6789", "speeds": { "1": 50000, "2": 40000,
modified
roles/organizr/defaults/main.yml
@@ -18,135 +18,90 @@ ################################ organizr_branch: "v2-master"-organizr_fpm: "yes" ################################ # Paths ################################ -organizr_paths_folder: "{{ organizr_name }}"-organizr_paths_location: "{{ server_appdata_path }}/{{ organizr_paths_folder }}"-organizr_paths_folders_list:- - "{{ organizr_paths_location }}"+organizr_role_paths_folder: "{{ organizr_name }}"+organizr_role_paths_location: "{{ server_appdata_path }}/{{ organizr_role_paths_folder }}"+organizr_role_paths_folders_list:+ - "{{ organizr_role_paths_location }}" ################################ # Web ################################ -organizr_web_subdomain: "{{ organizr_name }}"-organizr_web_domain: "{{ user.domain }}"-organizr_web_port: "80"-organizr_web_url: "{{ 'https://' + (organizr_web_subdomain + '.' + organizr_web_domain- if (organizr_web_subdomain | length > 0)- else organizr_web_domain) }}"+organizr_role_web_subdomain: "{{ organizr_name }}"+organizr_role_web_domain: "{{ user.domain }}"+organizr_role_web_port: "80"+organizr_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='organizr') + '.' + lookup('role_var', '_web_domain', role='organizr')+ if (lookup('role_var', '_web_subdomain', role='organizr') | length > 0)+ else lookup('role_var', '_web_domain', role='organizr')) }}" ################################ # DNS ################################ -organizr_dns_record: "{{ organizr_web_subdomain }}"-organizr_dns_zone: "{{ organizr_web_domain }}"-organizr_dns_proxy: "{{ dns.proxied }}"+organizr_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='organizr') }}"+organizr_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='organizr') }}"+organizr_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -organizr_traefik_sso_middleware: ""-organizr_traefik_middleware_default: "{{ traefik_default_middleware }}"-organizr_traefik_middleware_custom: ""-organizr_traefik_certresolver: "{{ traefik_default_certresolver }}"-organizr_traefik_enabled: true-organizr_traefik_api_enabled: false-organizr_traefik_api_endpoint: ""+organizr_role_traefik_sso_middleware: ""+organizr_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+organizr_role_traefik_middleware_custom: ""+organizr_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+organizr_role_traefik_enabled: true+organizr_role_traefik_api_enabled: false+organizr_role_traefik_api_endpoint: "" ################################ # Docker ################################ # Container-organizr_docker_container: "{{ organizr_name }}"+organizr_role_docker_container: "{{ organizr_name }}" # Image-organizr_docker_image_pull: true-organizr_docker_image_tag: "latest"-organizr_docker_image: "organizr/organizr:{{ organizr_docker_image_tag }}"--# Ports-organizr_docker_ports_defaults: []-organizr_docker_ports_custom: []-organizr_docker_ports: "{{ organizr_docker_ports_defaults- + organizr_docker_ports_custom }}"+organizr_role_docker_image_pull: true+organizr_role_docker_image_repo: "organizr/organizr"+organizr_role_docker_image_tag: "latest"+organizr_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='organizr') }}:{{ lookup('role_var', '_docker_image_tag', role='organizr') }}" # Envs-organizr_docker_envs_default:+organizr_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" TZ: "{{ tz }}"- fpm: "{{ organizr_fpm }}" branch: "{{ organizr_branch }}"-organizr_docker_envs_custom: {}-organizr_docker_envs: "{{ organizr_docker_envs_default- | combine(organizr_docker_envs_custom) }}"--# Commands-organizr_docker_commands_default: []-organizr_docker_commands_custom: []-organizr_docker_commands: "{{ organizr_docker_commands_default- + organizr_docker_commands_custom }}"+organizr_role_docker_envs_custom: {}+organizr_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='organizr')+ | combine(lookup('role_var', '_docker_envs_custom', role='organizr')) }}" # Volumes-organizr_docker_volumes_default:- - "{{ organizr_paths_location }}:/config"-organizr_docker_volumes_custom: []-organizr_docker_volumes: "{{ organizr_docker_volumes_default- + organizr_docker_volumes_custom }}"--# Devices-organizr_docker_devices_default: []-organizr_docker_devices_custom: []-organizr_docker_devices: "{{ organizr_docker_devices_default- + organizr_docker_devices_custom }}"--# Hosts-organizr_docker_hosts_default: {}-organizr_docker_hosts_custom: {}-organizr_docker_hosts: "{{ docker_hosts_common- | combine(organizr_docker_hosts_default)- | combine(organizr_docker_hosts_custom) }}"--# Labels-organizr_docker_labels_default: {}-organizr_docker_labels_custom: {}-organizr_docker_labels: "{{ docker_labels_common- | combine(organizr_docker_labels_default)- | combine(organizr_docker_labels_custom) }}"+organizr_role_docker_volumes_default:+ - "{{ organizr_role_paths_location }}:/config"+organizr_role_docker_volumes_custom: []+organizr_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='organizr')+ + lookup('role_var', '_docker_volumes_custom', role='organizr') }}" # Hostname-organizr_docker_hostname: "{{ organizr_name }}"+organizr_role_docker_hostname: "{{ organizr_name }}" # Networks-organizr_docker_networks_alias: "{{ organizr_name }}"-organizr_docker_networks_default: []-organizr_docker_networks_custom: []-organizr_docker_networks: "{{ docker_networks_common- + organizr_docker_networks_default- + organizr_docker_networks_custom }}"--# Capabilities-organizr_docker_capabilities_default: []-organizr_docker_capabilities_custom: []-organizr_docker_capabilities: "{{ organizr_docker_capabilities_default- + organizr_docker_capabilities_custom }}"--# Security Opts-organizr_docker_security_opts_default: []-organizr_docker_security_opts_custom: []-organizr_docker_security_opts: "{{ organizr_docker_security_opts_default- + organizr_docker_security_opts_custom }}"+organizr_role_docker_networks_alias: "{{ organizr_name }}"+organizr_role_docker_networks_default: []+organizr_role_docker_networks_custom: []+organizr_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='organizr')+ + lookup('role_var', '_docker_networks_custom', role='organizr') }}" # Restart Policy-organizr_docker_restart_policy: unless-stopped+organizr_role_docker_restart_policy: unless-stopped # State-organizr_docker_state: started+organizr_role_docker_state: started
modified
roles/organizr/tasks/main.yml
@@ -11,9 +11,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"
modified
roles/overseerr/defaults/main.yml
@@ -17,162 +17,119 @@ # Settings ################################ -overseerr_log_level: "info"+overseerr_role_log_level: "info" ################################ # Paths ################################ -overseerr_paths_folder: "{{ overseerr_name }}"-overseerr_paths_location: "{{ server_appdata_path }}/{{ overseerr_paths_folder }}"-overseerr_paths_cache: "{{ overseerr_paths_location }}/cache"-overseerr_paths_folders_list:- - "{{ overseerr_paths_location }}"- - "{{ overseerr_paths_cache }}"-overseerr_paths_config_location: "{{ overseerr_paths_location }}/settings.json"+overseerr_role_paths_folder: "{{ overseerr_name }}"+overseerr_role_paths_location: "{{ server_appdata_path }}/{{ overseerr_role_paths_folder }}"+overseerr_role_paths_cache: "{{ overseerr_role_paths_location }}/cache"+overseerr_role_paths_folders_list:+ - "{{ overseerr_role_paths_location }}"+ - "{{ overseerr_role_paths_cache }}"+overseerr_role_paths_config_location: "{{ overseerr_role_paths_location }}/settings.json" ################################ # Web ################################ -overseerr_web_subdomain: "{{ overseerr_name }}"-overseerr_web_domain: "{{ user.domain }}"-overseerr_web_port: "5055"-overseerr_web_url: "{{ 'https://' + (lookup('vars', overseerr_name + '_web_subdomain', default=overseerr_web_subdomain) + '.' + lookup('vars', overseerr_name + '_web_domain', default=overseerr_web_domain)- if (lookup('vars', overseerr_name + '_web_subdomain', default=overseerr_web_subdomain) | length > 0)- else lookup('vars', overseerr_name + '_web_domain', default=overseerr_web_domain)) }}"+overseerr_role_web_subdomain: "{{ overseerr_name }}"+overseerr_role_web_domain: "{{ user.domain }}"+overseerr_role_web_port: "5055"+overseerr_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='overseerr') + '.' + lookup('role_var', '_web_domain', role='overseerr')+ if (lookup('role_var', '_web_subdomain', role='overseerr') | length > 0)+ else lookup('role_var', '_web_domain', role='overseerr')) }}" ################################ # DNS ################################ -overseerr_dns_record: "{{ lookup('vars', overseerr_name + '_web_subdomain', default=overseerr_web_subdomain) }}"-overseerr_dns_zone: "{{ lookup('vars', overseerr_name + '_web_domain', default=overseerr_web_domain) }}"-overseerr_dns_proxy: "{{ dns.proxied }}"+overseerr_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='overseerr') }}"+overseerr_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='overseerr') }}"+overseerr_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -overseerr_traefik_sso_middleware: ""-overseerr_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', overseerr_name + '_name', default=overseerr_name)- if (overseerr_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-overseerr_traefik_middleware_custom: ""-overseerr_traefik_certresolver: "{{ traefik_default_certresolver }}"-overseerr_traefik_enabled: true-overseerr_traefik_api_enabled: false-overseerr_traefik_api_endpoint: ""+overseerr_role_traefik_sso_middleware: ""+overseerr_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + overseerr_name+ if (lookup('role_var', '_themepark_enabled', role='overseerr') and global_themepark_plugin_enabled)+ else '') }}"+overseerr_role_traefik_middleware_custom: ""+overseerr_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+overseerr_role_traefik_enabled: true+overseerr_role_traefik_api_enabled: false+overseerr_role_traefik_api_endpoint: "" ################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-overseerr_themepark_enabled: false-overseerr_themepark_app: "overseerr"-overseerr_themepark_theme: "{{ global_themepark_theme }}"-overseerr_themepark_domain: "{{ global_themepark_domain }}"-overseerr_themepark_addons: []+overseerr_role_themepark_enabled: false+overseerr_role_themepark_app: "overseerr"+overseerr_role_themepark_theme: "{{ global_themepark_theme }}"+overseerr_role_themepark_domain: "{{ global_themepark_domain }}"+overseerr_role_themepark_addons: [] ################################ # Docker ################################ # Container-overseerr_docker_container: "{{ overseerr_name }}"+overseerr_role_docker_container: "{{ overseerr_name }}" # Image-overseerr_docker_image_pull: true-overseerr_docker_image_repo: "sctx/overseerr"-overseerr_docker_image_tag: "latest"-overseerr_docker_image: "{{ lookup('vars', overseerr_name + '_docker_image_repo', default=overseerr_docker_image_repo)- + ':' + lookup('vars', overseerr_name + '_docker_image_tag', default=overseerr_docker_image_tag) }}"--# Ports-overseerr_docker_ports_defaults: []-overseerr_docker_ports_custom: []-overseerr_docker_ports: "{{ lookup('vars', overseerr_name + '_docker_ports_defaults', default=overseerr_docker_ports_defaults)- + lookup('vars', overseerr_name + '_docker_ports_custom', default=overseerr_docker_ports_custom) }}"+overseerr_role_docker_image_pull: true+overseerr_role_docker_image_repo: "sctx/overseerr"+overseerr_role_docker_image_tag: "latest"+overseerr_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='overseerr') }}:{{ lookup('role_var', '_docker_image_tag', role='overseerr') }}" # Envs-overseerr_docker_envs_default:+overseerr_role_docker_envs_default: UMASK: "002" TZ: "{{ tz }}"- LOG_LEVEL: "{{ overseerr_log_level }}"-overseerr_docker_envs_custom: {}-overseerr_docker_envs: "{{ lookup('vars', overseerr_name + '_docker_envs_default', default=overseerr_docker_envs_default)- | combine(lookup('vars', overseerr_name + '_docker_envs_custom', default=overseerr_docker_envs_custom)) }}"--# Commands-overseerr_docker_commands_default: []-overseerr_docker_commands_custom: []-overseerr_docker_commands: "{{ lookup('vars', overseerr_name + '_docker_commands_default', default=overseerr_docker_commands_default)- + lookup('vars', overseerr_name + '_docker_commands_custom', default=overseerr_docker_commands_custom) }}"+ LOG_LEVEL: "{{ lookup('role_var', '_log_level', role='overseerr') }}"+overseerr_role_docker_envs_custom: {}+overseerr_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='overseerr')+ | combine(lookup('role_var', '_docker_envs_custom', role='overseerr')) }}" # Volumes-overseerr_docker_volumes_default:- - "{{ overseerr_paths_location }}:/app/config"-overseerr_docker_volumes_custom: []-overseerr_docker_volumes: "{{ lookup('vars', overseerr_name + '_docker_volumes_default', default=overseerr_docker_volumes_default)- + lookup('vars', overseerr_name + '_docker_volumes_custom', default=overseerr_docker_volumes_custom) }}"--# Devices-overseerr_docker_devices_default: []-overseerr_docker_devices_custom: []-overseerr_docker_devices: "{{ lookup('vars', overseerr_name + '_docker_devices_default', default=overseerr_docker_devices_default)- + lookup('vars', overseerr_name + '_docker_devices_custom', default=overseerr_docker_devices_custom) }}"--# Hosts-overseerr_docker_hosts_default: {}-overseerr_docker_hosts_custom: {}-overseerr_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', overseerr_name + '_docker_hosts_default', default=overseerr_docker_hosts_default))- | combine(lookup('vars', overseerr_name + '_docker_hosts_custom', default=overseerr_docker_hosts_custom)) }}"+overseerr_role_docker_volumes_default:+ - "{{ overseerr_role_paths_location }}:/app/config"+overseerr_role_docker_volumes_custom: []+overseerr_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='overseerr')+ + lookup('role_var', '_docker_volumes_custom', role='overseerr') }}" # Labels-overseerr_docker_labels_default: {}-overseerr_docker_labels_custom: {}-overseerr_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', overseerr_name + '_docker_labels_default', default=overseerr_docker_labels_default))- | combine((traefik_themepark_labels- if (overseerr_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', overseerr_name + '_docker_labels_custom', default=overseerr_docker_labels_custom)) }}"+overseerr_role_docker_labels_default: {}+overseerr_role_docker_labels_custom: {}+overseerr_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='overseerr')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='overseerr') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='overseerr')) }}" # Hostname-overseerr_docker_hostname: "{{ overseerr_name }}"--# Network Mode-overseerr_docker_network_mode_default: "{{ docker_networks_name_common }}"-overseerr_docker_network_mode: "{{ lookup('vars', overseerr_name + '_docker_network_mode_default', default=overseerr_docker_network_mode_default) }}"+overseerr_role_docker_hostname: "{{ overseerr_name }}" # Networks-overseerr_docker_networks_alias: "{{ overseerr_name }}"-overseerr_docker_networks_default: []-overseerr_docker_networks_custom: []-overseerr_docker_networks: "{{ docker_networks_common- + lookup('vars', overseerr_name + '_docker_networks_default', default=overseerr_docker_networks_default)- + lookup('vars', overseerr_name + '_docker_networks_custom', default=overseerr_docker_networks_custom) }}"--# Capabilities-overseerr_docker_capabilities_default: []-overseerr_docker_capabilities_custom: []-overseerr_docker_capabilities: "{{ lookup('vars', overseerr_name + '_docker_capabilities_default', default=overseerr_docker_capabilities_default)- + lookup('vars', overseerr_name + '_docker_capabilities_custom', default=overseerr_docker_capabilities_custom) }}"--# Security Opts-overseerr_docker_security_opts_default: []-overseerr_docker_security_opts_custom: []-overseerr_docker_security_opts: "{{ lookup('vars', overseerr_name + '_docker_security_opts_default', default=overseerr_docker_security_opts_default)- + lookup('vars', overseerr_name + '_docker_security_opts_custom', default=overseerr_docker_security_opts_custom) }}"+overseerr_role_docker_networks_alias: "{{ overseerr_name }}"+overseerr_role_docker_networks_default: []+overseerr_role_docker_networks_custom: []+overseerr_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='overseerr')+ + lookup('role_var', '_docker_networks_custom', role='overseerr') }}" # Restart Policy-overseerr_docker_restart_policy: unless-stopped+overseerr_role_docker_restart_policy: unless-stopped # State-overseerr_docker_state: started+overseerr_role_docker_state: started # User-overseerr_docker_user: "{{ uid }}:{{ gid }}"+overseerr_role_docker_user: "{{ uid }}:{{ gid }}"
modified
roles/overseerr/tasks/main2.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"
modified
roles/permissions/tasks/main.yml
@@ -7,11 +7,11 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Permissions | Recursively set permissions for '/mnt/local'- ansible.builtin.shell: "chown -R {{ user.name }}:{{ user.name }} /mnt/local"+- name: Permissions | Recursively set permissions for '{{ server_local_folder_path }}'+ ansible.builtin.shell: "chown -R {{ user.name }}:{{ user.name }} {{ server_local_folder_path }}" -- name: Permissions | Recursively set permissions for '/opt'- ansible.builtin.shell: "chown -R {{ user.name }}:{{ user.name }} /opt"+- name: Permissions | Recursively set permissions for '{{ server_appdata_path }}'+ ansible.builtin.shell: "chown -R {{ user.name }}:{{ user.name }} {{ server_appdata_path }}" - name: Permissions | Recursively set permissions for '/home/{{ user.name }}' ansible.builtin.shell: "chown -R {{ user.name }}:{{ user.name }} /home/{{ user.name }}"
modified
roles/petio/defaults/main.yml
@@ -17,139 +17,96 @@ # Settings ################################ -petio_mongodb_version: "4.4"+petio_role_mongodb_version: "4.4" ################################ # Paths ################################ -petio_paths_folder: "{{ petio_name }}"-petio_paths_location: "{{ server_appdata_path }}/{{ petio_paths_folder }}"-petio_paths_folders_list:- - "{{ petio_paths_location }}"+petio_role_paths_folder: "{{ petio_name }}"+petio_role_paths_location: "{{ server_appdata_path }}/{{ petio_role_paths_folder }}"+petio_role_paths_folders_list:+ - "{{ petio_role_paths_location }}" ################################ # Web ################################ -petio_web_subdomain: "{{ petio_name }}"-petio_web_domain: "{{ user.domain }}"-petio_web_port: "7777"-petio_web_url: "{{ 'https://' + (petio_web_subdomain + '.' + petio_web_domain- if (petio_web_subdomain | length > 0)- else petio_web_domain) }}"+petio_role_web_subdomain: "{{ petio_name }}"+petio_role_web_domain: "{{ user.domain }}"+petio_role_web_port: "7777"+petio_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='petio') + '.' + lookup('role_var', '_web_domain', role='petio')+ if (lookup('role_var', '_web_subdomain', role='petio') | length > 0)+ else lookup('role_var', '_web_domain', role='petio')) }}" ################################ # DNS ################################ -petio_dns_record: "{{ petio_web_subdomain }}"-petio_dns_zone: "{{ petio_web_domain }}"-petio_dns_proxy: "{{ dns.proxied }}"+petio_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='petio') }}"+petio_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='petio') }}"+petio_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -petio_traefik_sso_middleware: ""-petio_traefik_middleware_default: "{{ traefik_default_middleware }}"-petio_traefik_middleware_custom: ""-petio_traefik_certresolver: "{{ traefik_default_certresolver }}"-petio_traefik_enabled: true-petio_traefik_api_enabled: false-petio_traefik_api_endpoint: ""+petio_role_traefik_sso_middleware: ""+petio_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+petio_role_traefik_middleware_custom: ""+petio_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+petio_role_traefik_enabled: true+petio_role_traefik_api_enabled: false+petio_role_traefik_api_endpoint: "" ################################ # Docker ################################ # Container-petio_docker_container: "{{ petio_name }}"+petio_role_docker_container: "{{ petio_name }}" # Image-petio_docker_image_pull: true-petio_docker_image_tag: "latest"-petio_docker_image: "ghcr.io/petio-team/petio:{{ petio_docker_image_tag }}"--# Ports-petio_docker_ports_defaults: []-petio_docker_ports_custom: []-petio_docker_ports: "{{ petio_docker_ports_defaults- + petio_docker_ports_custom }}"+petio_role_docker_image_pull: true+petio_role_docker_image_repo: "ghcr.io/petio-team/petio"+petio_role_docker_image_tag: "latest"+petio_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='petio') }}:{{ lookup('role_var', '_docker_image_tag', role='petio') }}" # Envs-petio_docker_envs_default:+petio_role_docker_envs_default: TZ: "{{ tz }}"-petio_docker_envs_custom: {}-petio_docker_envs: "{{ petio_docker_envs_default- | combine(petio_docker_envs_custom) }}"--# Commands-petio_docker_commands_default: []-petio_docker_commands_custom: []-petio_docker_commands: "{{ petio_docker_commands_default- + petio_docker_commands_custom }}"+petio_role_docker_envs_custom: {}+petio_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='petio')+ | combine(lookup('role_var', '_docker_envs_custom', role='petio')) }}" # Volumes-petio_docker_volumes_default:- - "{{ petio_paths_location }}:/app/api/config"-petio_docker_volumes_custom: []-petio_docker_volumes: "{{ petio_docker_volumes_default- + petio_docker_volumes_custom }}"--# Devices-petio_docker_devices_default: []-petio_docker_devices_custom: []-petio_docker_devices: "{{ petio_docker_devices_default- + petio_docker_devices_custom }}"--# Hosts-petio_docker_hosts_default: {}-petio_docker_hosts_custom: {}-petio_docker_hosts: "{{ docker_hosts_common- | combine(petio_docker_hosts_default)- | combine(petio_docker_hosts_custom) }}"--# Labels-petio_docker_labels_default: {}-petio_docker_labels_custom: {}-petio_docker_labels: "{{ docker_labels_common- | combine(petio_docker_labels_default)- | combine(petio_docker_labels_custom) }}"+petio_role_docker_volumes_default:+ - "{{ petio_role_paths_location }}:/app/api/config"+petio_role_docker_volumes_custom: []+petio_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='petio')+ + lookup('role_var', '_docker_volumes_custom', role='petio') }}" # Hostname-petio_docker_hostname: "{{ petio_name }}"+petio_role_docker_hostname: "{{ petio_name }}" # Networks-petio_docker_networks_alias: "{{ petio_name }}"-petio_docker_networks_default: []-petio_docker_networks_custom: []-petio_docker_networks: "{{ docker_networks_common- + petio_docker_networks_default- + petio_docker_networks_custom }}"--# Capabilities-petio_docker_capabilities_default: []-petio_docker_capabilities_custom: []-petio_docker_capabilities: "{{ petio_docker_capabilities_default- + petio_docker_capabilities_custom }}"--# Security Opts-petio_docker_security_opts_default: []-petio_docker_security_opts_custom: []-petio_docker_security_opts: "{{ petio_docker_security_opts_default- + petio_docker_security_opts_custom }}"+petio_role_docker_networks_alias: "{{ petio_name }}"+petio_role_docker_networks_default: []+petio_role_docker_networks_custom: []+petio_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='petio')+ + lookup('role_var', '_docker_networks_custom', role='petio') }}" # Restart Policy-petio_docker_restart_policy: unless-stopped+petio_role_docker_restart_policy: unless-stopped # State-petio_docker_state: started+petio_role_docker_state: started # User-petio_docker_user: "{{ uid }}:{{ gid }}"+petio_role_docker_user: "{{ uid }}:{{ gid }}" # Dependencies-petio_depends_on: "{{ petio_name }}-mongo"-petio_depends_on_delay: "0"-petio_depends_on_healthchecks: "false"+petio_role_depends_on: "{{ petio_name }}-mongo"+petio_role_depends_on_delay: "0"+petio_role_depends_on_healthchecks: "false"
modified
roles/petio/tasks/main.yml
@@ -13,16 +13,16 @@ vars: mongodb_instances: ["{{ petio_name }}-mongo"] mongodb_docker_env_db: "{{ petio_name }}"- mongodb_docker_image_tag: "{{ petio_mongodb_version }}"+ mongodb_docker_image_tag: "{{ lookup('role_var', '_mongodb_version', role='petio') }}" mongodb_paths_folder: "{{ petio_name }}"- mongodb_paths_location: "{{ server_appdata_path }}/{{ mongodb_paths_folder }}/mongodb"+ mongodb_paths_location: "{{ server_appdata_path }}/{{ mongodb_role_paths_folder }}/mongodb" - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"
modified
roles/plex/defaults/main.yml
@@ -17,152 +17,138 @@ # Settings ################################ -plex_open_main_ports: false-plex_open_local_ports: false-plex_plugin_webtools: false-plex_plugin_sub_zero: false-plex_insecure: false-plex_lan_ip: ""+# Do not enable globally if deploying multiple instances+plex_role_open_main_ports: false+# Do not enable globally if deploying multiple instances+plex_role_open_local_ports: false+# Disables Traefik's HTTP to HTTPS redirect for Plex+# Allows older clients with certificate issues to connect insecurely+plex_role_insecure: false+# Adds the IP specified here to the advertised URLs Plex broadcasts to clients+# Useful to avoid traffic going through your WAN when hairpin NAT is not available+plex_role_lan_ip: ""+# Example value "http://gluetun:8888"+plex_role_auth_token_proxy: "" ################################ # Proxy ################################ -# For instances this works the same as usual plex2_auth_token_proxy for an instance named plex2.-plex_auth_token_proxy: ""-+# Do not edit or override using the inventory plex_proxy_dict:- http_proxy: "{{ plex_proxy_lookup if (plex_proxy_lookup | length > 0) else '' }}"- https_proxy: "{{ plex_proxy_lookup if (plex_proxy_lookup | length > 0) else '' }}"--# Do not edit or override using the inventory-plex_proxy_lookup: "{{ lookup('vars', plex_name + '_auth_token_proxy', default=plex_auth_token_proxy) }}"-plex_network_modes: []+ http_proxy: "{{ plex_role_proxy_lookup if (plex_role_proxy_lookup | length > 0) else '' }}"+ https_proxy: "{{ plex_role_proxy_lookup if (plex_role_proxy_lookup | length > 0) else '' }}"++# Do not edit or override using the inventory+plex_role_proxy_lookup: "{{ lookup('role_var', '_auth_token_proxy', role='plex') }}"+# Do not edit or override using the inventory+plex_role_network_modes: [] ################################ # Paths ################################ -plex_paths_folder: "{{ plex_name }}"-plex_paths_location: "{{ server_appdata_path }}/{{ plex_paths_folder }}"-plex_paths_transcodes_location: "{{ transcodes_path }}/{{ plex_paths_folder }}"-plex_paths_folders_list:- - "{{ plex_paths_location }}"- - "{{ plex_paths_location }}/Library"- - "{{ plex_paths_location }}/Library/Application Support"- - "{{ plex_paths_location }}/Library/Application Support/Plex Media Server"- - "{{ plex_paths_location }}/Library/Application Support/Plex Media Server/Plug-ins"- - "{{ plex_paths_location }}/Library/Logs"- - "{{ plex_paths_location }}/Library/Logs/Plex Media Server"- - "{{ plex_paths_transcodes_location }}"-plex_paths_application_support_location: "{{ plex_paths_location }}/Library/Application Support/Plex Media Server"-plex_paths_config_location: "{{ plex_paths_application_support_location }}/Preferences.xml"-plex_paths_log_location: "{{ plex_paths_application_support_location }}/Logs"-plex_paths_plugins_location: "{{ plex_paths_application_support_location }}/Plug-ins"-plex_paths_plugin_support_location: "{{ plex_paths_application_support_location }}/Plug-in Support"-plex_paths_db_location: "{{ plex_paths_plugin_support_location }}/Databases/com.plexapp.plugins.library.db"-plex_paths_db_blobs_location: "{{ plex_paths_plugin_support_location }}/Databases/com.plexapp.plugins.library.blobs.db"+plex_role_paths_folder: "{{ plex_name }}"+plex_role_paths_location: "{{ server_appdata_path }}/{{ plex_role_paths_folder }}"+plex_role_paths_transcodes_location: "{{ transcodes_path }}/{{ plex_role_paths_folder }}"+plex_role_paths_folders_list:+ - "{{ plex_role_paths_location }}"+ - "{{ plex_role_paths_location }}/Library"+ - "{{ plex_role_paths_location }}/Library/Application Support"+ - "{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server"+ - "{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server/Plug-ins"+ - "{{ plex_role_paths_location }}/Library/Logs"+ - "{{ plex_role_paths_location }}/Library/Logs/Plex Media Server"+ - "{{ plex_role_paths_transcodes_location }}"+plex_role_paths_application_support_location: "{{ plex_role_paths_location }}/Library/Application Support/Plex Media Server"+plex_role_paths_config_location: "{{ plex_role_paths_application_support_location }}/Preferences.xml"+plex_role_paths_log_location: "{{ plex_role_paths_application_support_location }}/Logs"+plex_role_paths_plugins_location: "{{ plex_role_paths_application_support_location }}/Plug-ins"+plex_role_paths_plugin_support_location: "{{ plex_role_paths_application_support_location }}/Plug-in Support"+plex_role_paths_db_location: "{{ plex_role_paths_plugin_support_location }}/Databases/com.plexapp.plugins.library.db"+plex_role_paths_db_blobs_location: "{{ plex_role_paths_plugin_support_location }}/Databases/com.plexapp.plugins.library.blobs.db" ################################ # Web ################################ -plex_web_subdomain: "{{ plex_name }}"-plex_web_domain: "{{ user.domain }}"-plex_web_port: "32400"-plex_web_scheme: "https"-plex_web_http_port: "32400"-plex_web_http_scheme: "http"-plex_web_url: "{{ 'https://' + (lookup('vars', plex_name + '_web_subdomain', default=plex_web_subdomain) + '.' + lookup('vars', plex_name + '_web_domain', default=plex_web_domain)- if (lookup('vars', plex_name + '_web_subdomain', default=plex_web_subdomain) | length > 0)- else lookup('vars', plex_name + '_web_domain', default=plex_web_domain)) }}"-plex_webtools_web_subdomain: "{{ plex_name }}-webtools"-plex_webtools_web_domain: "{{ plex_web_domain }}"-plex_webtools_web_port: "33400"-plex_webtools_host: "{{ lookup('vars', plex_name + '_webtools_web_subdomain', default=plex_webtools_web_subdomain)- + '.' + lookup('vars', plex_name + '_webtools_web_domain', default=plex_webtools_web_domain) }}"-plex_web_insecure_url: "{{ 'http://' + (lookup('vars', plex_name + '_web_subdomain', default=plex_web_subdomain) + '.' + lookup('vars', plex_name + '_web_domain', default=plex_web_domain)- if (lookup('vars', plex_name + '_web_subdomain', default=plex_web_subdomain) | length > 0)- else lookup('vars', plex_name + '_web_domain', default=plex_web_domain)) }}"+plex_role_web_subdomain: "{{ plex_name }}"+plex_role_web_domain: "{{ user.domain }}"+plex_role_web_port: "32400"+# Do not edit or override using the inventory+plex_role_web_scheme: "https"+plex_role_web_http_port: "32400"+# Do not edit or override using the inventory+plex_role_web_http_scheme: "http"+plex_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='plex') + '.' + lookup('role_var', '_web_domain', role='plex')+ if (lookup('role_var', '_web_subdomain', role='plex') | length > 0)+ else lookup('role_var', '_web_domain', role='plex')) }}"+plex_role_web_insecure_url: "{{ 'http://' + (lookup('role_var', '_web_subdomain', role='plex') + '.' + lookup('role_var', '_web_domain', role='plex')+ if (lookup('role_var', '_web_subdomain', role='plex') | length > 0)+ else lookup('role_var', '_web_domain', role='plex')) }}" ################################ # DNS ################################ -plex_dns_record: "{{ lookup('vars', plex_name + '_web_subdomain', default=plex_web_subdomain) }}"-plex_dns_zone: "{{ lookup('vars', plex_name + '_web_domain', default=plex_web_domain) }}"-plex_dns_proxy: "{{ dns.proxied }}"-plex_webtools_dns_record: "{{ lookup('vars', plex_name + '_webtools_web_subdomain', default=plex_webtools_web_subdomain) }}"-plex_webtools_dns_zone: "{{ lookup('vars', plex_name + '_webtools_web_domain', default=plex_webtools_web_domain) }}"-plex_webtools_dns_proxy: "{{ dns.proxied }}"+plex_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='plex') }}"+plex_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='plex') }}"+plex_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -plex_traefik_sso_middleware: ""-plex_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', plex_name + '_name', default=plex_name)- if (plex_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-plex_traefik_middleware_custom: ""-plex_traefik_certresolver: "{{ traefik_default_certresolver }}"-plex_traefik_enabled: true-plex_traefik_api_enabled: false-plex_traefik_api_endpoint: ""-plex_traefik_error_pages_enabled: false-plex_traefik_gzip_enabled: false--plex_traefik_middleware_http: "{{ 'globalHeaders@file'- if plex_insecure- else traefik_default_middleware_default_http }}"-plex_web_serverstransport: "skipverify@file"--plex_webtools_traefik_sso_middleware: ""-plex_webtools_traefik_middleware_default: "{{ traefik_default_middleware- + (',' + lookup('vars', plex_name + '_webtools_traefik_sso_middleware', default=plex_webtools_traefik_sso_middleware)- if (lookup('vars', plex_name + '_webtools_traefik_sso_middleware', default=plex_webtools_traefik_sso_middleware) | length > 0)- else '') }}"-plex_webtools_traefik_middleware_custom: ""-plex_webtools_traefik_middleware: "{{ plex_webtools_traefik_middleware_default- + (',' + plex_webtools_traefik_middleware_custom- if (not plex_webtools_traefik_middleware_custom.startswith(',') and plex_webtools_traefik_middleware_custom | length > 0)- else plex_webtools_traefik_middleware_custom) }}"-plex_webtools_traefik_certresolver: "{{ traefik_default_certresolver }}"-plex_webtools_traefik_router: "{{ lookup('vars', plex_name + '_webtools_web_subdomain', default=plex_webtools_web_subdomain) }}"--################################-# THEME-################################--# Options can be found at https://github.com/themepark-dev/theme.park-plex_themepark_enabled: false-plex_themepark_app: "plex"-plex_themepark_theme: "{{ global_themepark_theme }}"-plex_themepark_domain: "{{ global_themepark_domain }}"-plex_themepark_addons: []+plex_role_traefik_sso_middleware: ""+plex_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + plex_name+ if (lookup('role_var', '_themepark_enabled', role='plex') and global_themepark_plugin_enabled)+ else '') }}"+plex_role_traefik_middleware_custom: ""+plex_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+plex_role_traefik_enabled: true+plex_role_traefik_api_enabled: false+plex_role_traefik_api_endpoint: ""+plex_role_traefik_error_pages_enabled: false+plex_role_traefik_gzip_enabled: false+plex_role_traefik_middleware_http_insecure: "{{ lookup('role_var', '_insecure', role='plex') | bool }}"+plex_role_web_serverstransport: "skipverify@file"++################################+# Theme+################################++plex_role_themepark_enabled: false+# Do not edit or override using the inventory+plex_role_themepark_app: "plex"+# Options can be found at https://docs.theme-park.dev/themes/plex/+plex_role_themepark_theme: "{{ global_themepark_theme }}"+# Allows you to override the url where CSS files can be found+plex_role_themepark_domain: "{{ global_themepark_domain }}"+# Options can be found at https://docs.theme-park.dev/themes/addons/+plex_role_themepark_addons: [] ################################ # Docker ################################ # Container-plex_docker_container: "{{ plex_name }}"+plex_role_docker_container: "{{ plex_name }}" # Image-plex_docker_image_pull: true-plex_docker_image_repo: "plexinc/pms-docker"-plex_docker_image_tag: "latest"-plex_docker_image: "{{ lookup('vars', plex_name + '_docker_image_repo', default=plex_docker_image_repo)- + ':' + lookup('vars', plex_name + '_docker_image_tag', default=plex_docker_image_tag) }}"+plex_role_docker_image_pull: true+plex_role_docker_image_repo: "plexinc/pms-docker"+plex_role_docker_image_tag: "latest"+plex_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='plex') }}:{{ lookup('role_var', '_docker_image_tag', role='plex') }}" # Ports-plex_docker_ports_32400: "{{ port_lookup_32400.meta.port- if (port_lookup_32400.meta.port is defined) and (port_lookup_32400.meta.port | trim | length > 0)- else '32400' }}"--plex_docker_ports_defaults: []-plex_docker_ports_local:+plex_role_docker_ports_32400: "{{ port_lookup_32400.meta.port+ if (port_lookup_32400.meta.port is defined) and (port_lookup_32400.meta.port | trim | length > 0)+ else '32400' }}"++plex_role_docker_ports_default: []+# Skip docs+plex_role_docker_ports_local: - "1900:1900/udp" - "5353:5353/udp" - "8324:8324"@@ -170,135 +156,89 @@ - "32412:32412/udp" - "32414:32414/udp" - "32469:32469"-plex_docker_ports_ui:- - "{{ plex_docker_ports_32400 }}:{{ lookup('vars', plex_name + '_web_port', default=plex_web_port) }}"-plex_docker_ports_custom: []-plex_docker_ports: "{{ lookup('vars', plex_name + '_docker_ports_defaults', default=plex_docker_ports_defaults)- + lookup('vars', plex_name + '_docker_ports_custom', default=plex_docker_ports_custom)- + (plex_docker_ports_local- if (lookup('vars', plex_name + '_open_local_ports', default=false))- else [])- + (lookup('vars', plex_name + '_docker_ports_ui', default=plex_docker_ports_ui)- if plex_open_main_ports- else []) }}"+# Skip docs+plex_role_docker_ports_ui:+ - "{{ plex_role_docker_ports_32400 }}:{{ lookup('vars', plex_name + '_web_port', default=plex_role_web_port) }}"+plex_role_docker_ports_custom: []+plex_role_docker_ports: "{{ lookup('role_var', '_docker_ports_default', role='plex')+ + lookup('role_var', '_docker_ports_custom', role='plex')+ + (plex_role_docker_ports_local+ if (lookup('role_var', '_open_local_ports', role='plex'))+ else [])+ + (plex_role_docker_ports_ui+ if lookup('role_var', '_open_main_ports', role='plex')+ else []) }}" # Envs-plex_docker_envs_advertise_ip_url: "{{ lookup('vars', plex_name + '_web_url', default=plex_web_url) + ':443,' + lookup('vars', plex_name + '_web_insecure_url', default=plex_web_insecure_url) + ':80'- if plex_insecure- else lookup('vars', plex_name + '_web_url', default=plex_web_url) + ':443' }}"-plex_docker_envs_advertise_ip: "{{ 'http://' + plex_lan_ip + ':32400,' + lookup('vars', plex_name + '_docker_envs_advertise_ip_url', default=plex_docker_envs_advertise_ip_url)- if (plex_lan_ip | length > 0) and plex_open_main_ports- else lookup('vars', plex_name + '_docker_envs_advertise_ip_url', default=plex_docker_envs_advertise_ip_url) }}"-plex_docker_envs_default:+plex_role_docker_envs_advertise_ip_url: "{{ lookup('role_var', '_web_url', role='plex') + ':443,' + lookup('role_var', '_web_insecure_url', role='plex') + ':80'+ if lookup('role_var', '_insecure', role='plex')+ else lookup('role_var', '_web_url', role='plex') + ':443' }}"+plex_role_docker_envs_advertise_ip: "{{ 'http://' + lookup('role_var', '_lan_ip', role='plex') + ':32400,' + lookup('role_var', '_docker_envs_advertise_ip_url', role='plex')+ if (lookup('role_var', '_lan_ip', role='plex') | length > 0) and lookup('role_var', '_open_main_ports', role='plex')+ else lookup('role_var', '_docker_envs_advertise_ip_url', role='plex') }}"+plex_role_docker_envs_default: PLEX_UID: "{{ uid }}" PLEX_GID: "{{ gid }}" PLEX_CLAIM: "{{ (plex_claim_code) | default(omit) }}" CHANGE_CONFIG_DIR_OWNERSHIP: "false" TZ: "{{ tz }}"- ADVERTISE_IP: "{{ plex_docker_envs_advertise_ip }}"-plex_docker_envs_custom: {}-plex_docker_envs: "{{ lookup('vars', plex_name + '_docker_envs_default', default=plex_docker_envs_default)- | combine(lookup('vars', plex_name + '_docker_envs_custom', default=plex_docker_envs_custom)) }}"--# Commands-plex_docker_commands_default: []-plex_docker_commands_custom: []-plex_docker_commands: "{{ lookup('vars', plex_name + '_docker_commands_default', default=plex_docker_commands_default)- + lookup('vars', plex_name + '_docker_commands_custom', default=plex_docker_commands_custom) }}"+ ADVERTISE_IP: "{{ lookup('role_var', '_docker_envs_advertise_ip', role='plex') }}"+plex_role_docker_envs_custom: {}+plex_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='plex')+ | combine(lookup('role_var', '_docker_envs_custom', role='plex')) }}" # Volumes-plex_docker_volumes_default:- - "{{ plex_paths_location }}:/config"+plex_role_docker_volumes_default:+ - "{{ plex_role_paths_location }}:/config" - "{{ server_appdata_path }}/scripts:/scripts" - "/dev/shm:/dev/shm"- - "{{ plex_paths_transcodes_location }}:/transcode"-plex_docker_volumes_legacy:+ - "{{ plex_role_paths_transcodes_location }}:/transcode"+plex_role_docker_volumes_legacy: - "/mnt/unionfs/Media:/data"-plex_docker_volumes_custom: []-plex_docker_volumes: "{{ lookup('vars', plex_name + '_docker_volumes_default', default=plex_docker_volumes_default)- + lookup('vars', plex_name + '_docker_volumes_custom', default=plex_docker_volumes_custom)- + (lookup('vars', plex_name + '_docker_volumes_legacy', default=plex_docker_volumes_legacy)- if docker_legacy_volume- else []) }}"+plex_role_docker_volumes_custom: []+plex_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='plex')+ + lookup('role_var', '_docker_volumes_custom', role='plex')+ + (lookup('role_var', '_docker_volumes_legacy', role='plex')+ if docker_legacy_volume+ else []) }}" # Mounts-plex_docker_mounts_default:+plex_role_docker_mounts_default: - target: /tmp type: tmpfs-plex_docker_mounts_custom: []-plex_docker_mounts: "{{ lookup('vars', plex_name + '_docker_mounts_default', default=plex_docker_mounts_default)- + lookup('vars', plex_name + '_docker_mounts_custom', default=plex_docker_mounts_custom) }}"--# Devices-plex_docker_devices_default: []-plex_docker_devices_custom: []-plex_docker_devices: "{{ lookup('vars', plex_name + '_docker_devices_default', default=plex_docker_devices_default)- + lookup('vars', plex_name + '_docker_devices_custom', default=plex_docker_devices_custom) }}"+plex_role_docker_mounts_custom: []+plex_role_docker_mounts: "{{ lookup('role_var', '_docker_mounts_default', role='plex')+ + lookup('role_var', '_docker_mounts_custom', role='plex') }}" # Hosts-plex_docker_hosts_default:+plex_role_docker_hosts_default: metric.plex.tv: "{{ ip_address_localhost }}" metrics.plex.tv: "{{ ip_address_localhost }}" analytics.plex.tv: "{{ ip_address_localhost }}"-plex_docker_hosts_custom: {}-plex_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', plex_name + '_docker_hosts_default', default=plex_docker_hosts_default))- | combine(lookup('vars', plex_name + '_docker_hosts_custom', default=plex_docker_hosts_custom)) }}"+plex_role_docker_hosts_custom: {}+plex_role_docker_hosts: "{{ lookup('role_var', '_docker_hosts_default', role='plex')+ | combine(lookup('role_var', '_docker_hosts_custom', role='plex')) }}" # Labels-plex_docker_labels_default:- - '{ "traefik.http.routers.{{ plex_webtools_traefik_router }}-http.entrypoints": "web" }'- - '{ "traefik.http.routers.{{ plex_webtools_traefik_router }}-http.service": "{{ lookup("vars", plex_name + "_webtools_web_subdomain", default=plex_webtools_web_subdomain) }}" }'- - '{ "traefik.http.routers.{{ plex_webtools_traefik_router }}-http.rule": "Host(`{{ plex_webtools_host }}`)" }'- - '{ "traefik.http.routers.{{ plex_webtools_traefik_router }}-http.middlewares": "{{ traefik_default_middleware_http }}" }'- - '{ "traefik.http.routers.{{ plex_webtools_traefik_router }}-http.priority": "20" }'- - '{ "traefik.http.routers.{{ plex_webtools_traefik_router }}.entrypoints": "websecure" }'- - '{ "traefik.http.routers.{{ plex_webtools_traefik_router }}.service": "{{ lookup("vars", plex_name + "_webtools_web_subdomain", default=plex_webtools_web_subdomain) }}" }'- - '{ "traefik.http.routers.{{ plex_webtools_traefik_router }}.rule": "Host(`{{ plex_webtools_host }}`)" }'- - '{ "traefik.http.routers.{{ plex_webtools_traefik_router }}.tls.options": "securetls@file" }'- - '{ "traefik.http.routers.{{ plex_webtools_traefik_router }}.tls.certresolver": "{{ plex_webtools_traefik_certresolver }}" }'- - '{ "traefik.http.routers.{{ plex_webtools_traefik_router }}.middlewares": "{{ plex_webtools_traefik_middleware }}" }'- - '{ "traefik.http.routers.{{ plex_webtools_traefik_router }}.priority": "20" }'- - '{ "traefik.http.services.{{ plex_webtools_traefik_router }}.loadbalancer.server.port": "{{ plex_webtools_web_port }}" }'-plex_docker_labels_custom: {}-plex_docker_labels: "{{ docker_labels_common- | combine((lookup('vars', plex_name + '_docker_labels_default', default=plex_docker_labels_default)- if plex_plugin_webtools- else {}),- (traefik_themepark_labels- if (plex_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', plex_name + '_docker_labels_custom', default=plex_docker_labels_custom)) }}"+plex_role_docker_labels_custom: {}+plex_role_docker_labels: "{{ lookup('role_var', '_docker_labels_custom', role='plex')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='plex') and global_themepark_plugin_enabled)+ else {})) }}" # Hostname-plex_docker_hostname: "{{ plex_name }}"--# Network Mode-plex_docker_network_mode_default: "{{ docker_networks_name_common }}"-plex_docker_network_mode: "{{ lookup('vars', plex_name + '_docker_network_mode_default', default=plex_docker_network_mode_default) }}"+plex_role_docker_hostname: "{{ plex_name }}" # Networks-plex_docker_networks_alias: "{{ plex_name }}"-plex_docker_networks_default: []-plex_docker_networks_custom: []-plex_docker_networks: "{{ docker_networks_common- + lookup('vars', plex_name + '_docker_networks_default', default=plex_docker_networks_default)- + lookup('vars', plex_name + '_docker_networks_custom', default=plex_docker_networks_custom) }}"--# Capabilities-plex_docker_capabilities_default: []-plex_docker_capabilities_custom: []-plex_docker_capabilities: "{{ lookup('vars', plex_name + '_docker_capabilities_default', default=plex_docker_capabilities_default)- + lookup('vars', plex_name + '_docker_capabilities_custom', default=plex_docker_capabilities_custom) }}"--# Security Opts-plex_docker_security_opts_default: []-plex_docker_security_opts_custom: []-plex_docker_security_opts: "{{ lookup('vars', plex_name + '_docker_security_opts_default', default=plex_docker_security_opts_default)- + lookup('vars', plex_name + '_docker_security_opts_custom', default=plex_docker_security_opts_custom) }}"+plex_role_docker_networks_alias: "{{ plex_name }}"+plex_role_docker_networks_default: []+plex_role_docker_networks_custom: []+plex_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='plex')+ + lookup('role_var', '_docker_networks_custom', role='plex') }}" # Restart Policy-plex_docker_restart_policy: unless-stopped+plex_role_docker_restart_policy: unless-stopped # State-plex_docker_state: started+plex_role_docker_state: started
modified
roles/plex/tasks/main.yml
@@ -7,19 +7,19 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Filter Plex Network Modes+- name: Filter Plex Network Modes # noqa jinja[invalid] ansible.builtin.set_fact:- plex_network_modes: "{{ plex_network_modes + [plex_lookup] }}"+ plex_role_network_modes: "{{ plex_role_network_modes + [plex_lookup] }}" vars: plex_name: "{{ item }}"- plex_lookup: "{{ lookup('vars', item + '_docker_network_mode_default', default=lookup('vars', item + '_docker_network_mode', default=plex_docker_network_mode)) }}"+ plex_lookup: "{{ lookup('role_var', '_docker_network_mode', role='plex') }}" loop: "{{ plex_instances }}" when: (plex_instances | length > 1) and ('container:' in plex_lookup) - name: Fail if non-unique entries are present ansible.builtin.fail: msg: "At least two of your Plex instances are using the same 'container:' network_mode and this will not work due to port collision."- when: (plex_instances | length > 1) and (plex_network_modes | length) != (plex_network_modes | unique | length)+ when: (plex_instances | length > 1) and (plex_role_network_modes | length) != (plex_role_network_modes | unique | length) - name: "Execute Plex roles" ansible.builtin.include_tasks: main2.yml
modified
roles/plex/tasks/main2.yml
@@ -7,20 +7,20 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Add DNS record- ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml"- vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+- name: Ensure that ports aren't opened when using multiple instances+ ansible.builtin.assert:+ that:+ - not plex_role_open_main_ports+ - not plex_role_open_local_ports+ fail_msg: "Port opening must be disabled when using multiple Plex instances"+ when: (plex_instances | length > 1) - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_webtools_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_webtools_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_webtools_dns_proxy') }}"- when: plex_plugin_webtools+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -33,8 +33,8 @@ path: "{{ item }}" state: absent with_items:- - "{{ plex_paths_location }}/98-themepark"- - "{{ plex_paths_location }}/99-modify-binary"+ - "{{ plex_role_paths_location }}/98-themepark"+ - "{{ plex_role_paths_location }}/99-modify-binary" - "{{ server_appdata_path }}/scripts/bbe" - name: Plex Reset Codecs tasks@@ -42,9 +42,9 @@ block: - name: Get contents of Plex Codecs Folder ansible.builtin.find:- paths: "{{ plex_paths_application_support_location }}/Codecs"+ paths: "{{ plex_role_paths_application_support_location }}/Codecs" file_type: directory- recurse: no+ recurse: false register: plex_codecs_removal - name: Delete Plex Codecs Folder@@ -54,16 +54,16 @@ with_items: "{{ plex_codecs_removal.files }}" - name: Pre-Install Tasks- ansible.builtin.import_tasks: "subtasks/pre-install/main.yml"+ ansible.builtin.include_tasks: "subtasks/pre-install/main.yml" when: (not continuous_integration) - name: Preferences Tasks- ansible.builtin.import_tasks: "subtasks/preferences/preferences.yml"+ ansible.builtin.include_tasks: "subtasks/preferences/preferences.yml" when: (not continuous_integration) - name: Docker Devices Task ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/set_docker_devices_variable.yml"- when: gpu.intel or use_nvidia+ when: use_intel or use_nvidia - name: Create Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml"@@ -71,4 +71,4 @@ - name: "Execute Plex Extra Tasks role" ansible.builtin.include_role: name: "plex_extra_tasks"- when: (plex_plugin_sub_zero or plex_plugin_webtools) and (not continuous_integration)+ when: (not continuous_integration)
modified
roles/plex/tasks/subtasks/preferences/claim_server.yml
@@ -15,7 +15,7 @@ ansible.builtin.uri: url: https://plex.tv/api/claim/token.json method: GET- return_content: yes+ return_content: true body: X-Plex-Version: "{{ plex_auth_token_version }}" X-Plex-Product: "{{ plex_auth_token_product }}"
modified
roles/plex/tasks/subtasks/preferences/preferences.yml
@@ -7,32 +7,32 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: "Preferences | Check if '{{ plex_paths_config_location | basename }}' exists"+- name: "Preferences | Check if '{{ lookup('role_var', '_paths_config_location', role='plex') | basename }}' exists" ansible.builtin.stat:- path: "{{ plex_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='plex') }}" register: preferences_xml -- name: "Preferences | '{{ plex_paths_config_location | basename }}' Tasks"+- name: "Preferences | '{{ lookup('role_var', '_paths_config_location', role='plex') | basename }}' Tasks" when: preferences_xml.stat.exists block: - name: "Preferences | Remove existing PlexOnlineToken from Preferences.xml" ansible.builtin.replace:- path: "{{ plex_paths_config_location }}"+ path: "{{ plex_role_paths_config_location }}" regexp: ' PlexOnlineToken="[^"]*"' replace: '' when: ('plex-reclaim' in ansible_run_tags) - - name: Preferences | Get '{{ plex_paths_config_location | basename }}' XML data+ - name: Preferences | Get '{{ lookup('role_var', '_paths_config_location', role='plex') | basename }}' XML data community.general.xml:- path: "{{ plex_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='plex') }}" xpath: /Preferences content: attribute register: preferences_xml_resp ignore_errors: true - - name: "Preferences | Remove '{{ plex_paths_config_location | basename }}' if malformed"+ - name: "Preferences | Remove '{{ lookup('role_var', '_paths_config_location', role='plex') | basename }}' if malformed" ansible.builtin.file:- path: "{{ plex_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='plex') }}" state: absent when: (preferences_xml_resp is failed) @@ -44,7 +44,7 @@ - name: "Preferences | Fix 'TranscoderTempDirectory'" community.general.xml:- path: "{{ plex_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='plex') }}" xpath: /Preferences attribute: TranscoderTempDirectory value: "/transcode"@@ -55,7 +55,7 @@ - name: "Preferences | Set 'secureConnections' to preferred" community.general.xml:- path: "{{ plex_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='plex') }}" xpath: /Preferences attribute: secureConnections value: "1"@@ -70,5 +70,5 @@ (preferences_xml_resp.matches[0].Preferences.PlexOnlineToken | trim | length > 0) }}" - name: Preferences | Claim {{ plex_name | title }} Server- ansible.builtin.import_tasks: "claim_server.yml"+ ansible.builtin.include_tasks: "claim_server.yml" when: (not preferences_xml.stat.exists) or (preferences_xml.stat.exists and (not plex_server_claimed))
modified
roles/plex_auth_token/defaults/main.yml
@@ -15,5 +15,11 @@ plex_auth_token_product: "Saltbox" plex_auth_token_platform: "Linux" plex_auth_token_platform_version: "1.0.0"-plex_auth_token_device: "Ubuntu {{ ansible_distribution_version }} - {{ ansible_kernel }}"+plex_auth_token_device: "Ubuntu {{ ansible_facts['distribution_version'] }} - {{ ansible_facts['kernel'] }}" plex_auth_token_device_name: "Saltbox"++################################+# Lookups+################################++plex_auth_token_ini_file: "{{ server_appdata_path }}/saltbox/plex.ini"
modified
roles/plex_auth_token/tasks/main.yml
@@ -13,7 +13,7 @@ - name: "Auth Token | Create directories" ansible.builtin.file:- path: "/opt/saltbox"+ path: "{{ server_appdata_path }}/saltbox" state: directory owner: "{{ user.name }}" group: "{{ user.name }}"@@ -21,7 +21,7 @@ - name: "Auth Token | Check if PIN exists" ansible.builtin.stat:- path: "/opt/saltbox/plex.ini"+ path: "{{ plex_auth_token_ini_file }}" register: plex_ini - name: "Auth Token | plex_auth_client_identifier"@@ -29,7 +29,7 @@ block: - name: "Auth Token | Lookup plex_auth_client_identifier" ansible.builtin.set_fact:- plex_auth_client_identifier: "{{ lookup('ini', 'client_identifier section=' + plex_name + ' file=/opt/saltbox/plex.ini') }}"+ plex_auth_client_identifier: "{{ lookup('ini', 'client_identifier section=' + plex_name + ' file=' + plex_auth_token_ini_file) }}" plex_auth_client_identifier_missing: false rescue:@@ -45,7 +45,7 @@ - name: "Auth Token | Set plex_auth_token variable if previously saved" ansible.builtin.set_fact:- plex_auth_token: "{{ lookup('ini', 'token section=' + plex_name + ' file=/opt/saltbox/plex.ini') | regex_replace('\n', '') }}"+ plex_auth_token: "{{ lookup('ini', 'token section=' + plex_name + ' file=' + plex_auth_token_ini_file) | regex_replace('\n', '') }}" when: plex_ini.stat.exists and (not plex_auth_client_identifier_missing) - name: "Auth Token | Set plex_no_token status"@@ -56,15 +56,15 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/network_container_health_status.yml" vars: _var_prefix: "plex"- when: (plex_proxy_lookup | length > 0)+ when: (plex_role_proxy_lookup | length > 0) - name: "Auth Token | Check if Token is valid" ansible.builtin.uri: url: "https://plex.tv/api/v2/user" method: GET- return_content: yes+ return_content: true body:- X-Plex-Token: "{{ lookup('ini', 'token section=' + plex_name + ' file=/opt/saltbox/plex.ini') }}"+ X-Plex-Token: "{{ lookup('ini', 'token section=' + plex_name + ' file=' + plex_auth_token_ini_file) }}" X-Plex-Version: "{{ plex_auth_token_version }}" X-Plex-Product: "{{ plex_auth_token_product }}" X-Plex-Client-Identifier: "{{ plex_auth_client_identifier }}"@@ -87,7 +87,7 @@ ansible.builtin.uri: url: "https://plex.tv/api/v2/pins" method: POST- return_content: yes+ return_content: true body: strong: "true" X-Plex-Version: "{{ plex_auth_token_version }}"@@ -112,7 +112,7 @@ ansible.builtin.uri: url: "https://plex.tv/api/v2/pins/{{ plex_pin.json.id }}" method: GET- return_content: yes+ return_content: true body: X-Plex-Client-Identifier: "{{ plex_auth_client_identifier }}" body_format: form-urlencoded@@ -130,7 +130,7 @@ ansible.builtin.uri: url: "https://plex.tv/api/v2/user" method: GET- return_content: yes+ return_content: true body: X-Plex-Token: "{{ plex_auth_token }}" X-Plex-Version: "{{ plex_auth_token_version }}"@@ -161,6 +161,7 @@ token: "{{ plex_auth_token }}" owner: "{{ user.name }}" group: "{{ user.name }}"+ base_path: "{{ server_appdata_path }}" overwrite: true - name: "Auth Token | Display Plex Auth Token"@@ -169,7 +170,7 @@ - name: Change permissions ansible.builtin.file:- path: /opt/saltbox+ path: "{{ server_appdata_path }}/saltbox" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0755"
modified
roles/plex_db/defaults/main.yml
@@ -7,10 +7,11 @@ # GNU General Public License v3.0 # ########################################################################## ---+plex_db_integrity_check_only: false+plex_db_failed_integrity: false+plex_db_failed_optimization: false++# Do not enable globally if deploying multiple instances plex_db_files: - "com.plexapp.plugins.library.db" - "com.plexapp.plugins.library.blobs.db"--plex_db_integrity_check_only: false-plex_db_failed_integrity: false-plex_db_failed_optimization: false
modified
roles/plex_db/tasks/main.yml
@@ -7,19 +7,19 @@ # GNU General Public License v3.0 # ########################################################################## ----- name: Delete old '/opt/plexsql'+- name: "Delete old '{{ server_appdata_path }}/plexsql'" ansible.builtin.file:- path: "/opt/plexsql"+ path: "{{ server_appdata_path }}/plexsql" state: absent - name: Copy Plex Binaries out- ansible.builtin.shell: "docker cp {{ plex_instances[0] }}:/usr/lib/plexmediaserver/ /opt/plexsql"+ ansible.builtin.shell: "docker cp {{ plex_instances[0] }}:/usr/lib/plexmediaserver/ {{ server_appdata_path }}/plexsql" -- name: Set '/opt/plexsql' ownership recursively+- name: Set '{{ server_appdata_path }}/plexsql' ownership recursively ansible.builtin.file:- path: "/opt/plexsql"+ path: "{{ server_appdata_path }}/plexsql" state: directory- recurse: yes+ recurse: true owner: "{{ user.name }}" group: "{{ user.name }}"
modified
roles/plex_db/tasks/main2.yml
@@ -7,14 +7,14 @@ # GNU General Public License v3.0 # ########################################################################## ----- name: "Check if '{{ plex_paths_db_location | basename }}' exists"+- name: "Check if '{{ lookup('role_var', '_paths_db_location', role='plex') | basename }}' exists" ansible.builtin.stat:- path: "{{ plex_paths_db_location }}"+ path: "{{ lookup('role_var', '_paths_db_location', role='plex') }}" register: plex_db -- name: "Check if '{{ plex_paths_db_blobs_location | basename }}' exists"+- name: "Check if '{{ lookup('role_var', '_paths_db_blobs_location', role='plex') | basename }}' exists" ansible.builtin.stat:- path: "{{ plex_paths_db_blobs_location }}"+ path: "{{ lookup('role_var', '_paths_db_blobs_location', role='plex') }}" register: plex_db_blobs - name: Fail if database does not exist@@ -30,12 +30,12 @@ - name: Integrity check block: - name: Check if main database passes integrity_check- ansible.builtin.shell: "'/opt/plexsql/Plex SQLite' '{{ plex_paths_db_location }}' 'PRAGMA integrity_check(1)'"+ ansible.builtin.shell: "'{{ server_appdata_path }}/plexsql/Plex SQLite' '{{ lookup('role_var', '_paths_db_location', role='plex') }}' 'PRAGMA integrity_check(1)'" register: plex_db_integrity_check failed_when: (plex_db_integrity_check.stdout != 'ok') - name: Check if blobs database passes integrity_check- ansible.builtin.shell: "'/opt/plexsql/Plex SQLite' '{{ plex_paths_db_blobs_location }}' 'PRAGMA integrity_check(1)'"+ ansible.builtin.shell: "'{{ server_appdata_path }}/plexsql/Plex SQLite' '{{ lookup('role_var', '_paths_db_blobs_location', role='plex') }}' 'PRAGMA integrity_check(1)'" register: plex_db_blobs_integrity_check failed_when: (plex_db_blobs_integrity_check.stdout != 'ok') @@ -44,7 +44,7 @@ ansible.builtin.set_fact: plex_db_failed_integrity: true - - name: "Notify | Plex instance '{{ plex_name }}' failed the integrity check."+ - name: "Plex instance '{{ plex_name }}' failed the integrity check." ansible.builtin.include_role: name: notify vars:@@ -62,26 +62,26 @@ mode: "0775" - name: Backup databases- ansible.builtin.shell: "cp '{{ plex_paths_plugin_support_location }}/Databases/{{ item }}' /tmp/{{ plex_name }}_backup/"+ ansible.builtin.shell: "cp '{{ lookup('role_var', '_paths_plugin_support_location', role='plex') }}/Databases/{{ item }}' /tmp/{{ plex_name }}_backup/" loop: "{{ plex_db_files }}" - - name: "Vacuum '{{ plex_paths_db_location | basename }}' database"- ansible.builtin.shell: "'/opt/plexsql/Plex SQLite' '{{ plex_paths_db_location }}' 'VACUUM;'"+ - name: "Vacuum '{{ lookup('role_var', '_paths_db_location', role='plex') | basename }}' database"+ ansible.builtin.shell: "'{{ server_appdata_path }}/plexsql/Plex SQLite' '{{ lookup('role_var', '_paths_db_location', role='plex') }}' 'VACUUM;'" register: plex_db_vacuum failed_when: (plex_db_vacuum.rc != 0) - - name: "Vacuum '{{ plex_paths_db_blobs_location | basename }}' database"- ansible.builtin.shell: "'/opt/plexsql/Plex SQLite' '{{ plex_paths_db_blobs_location }}' 'VACUUM;'"+ - name: "Vacuum '{{ lookup('role_var', '_paths_db_blobs_location', role='plex') | basename }}' database"+ ansible.builtin.shell: "'{{ server_appdata_path }}/plexsql/Plex SQLite' '{{ lookup('role_var', '_paths_db_blobs_location', role='plex') }}' 'VACUUM;'" register: plex_db_blobs_vacuum failed_when: (plex_db_blobs_vacuum.rc != 0) - - name: "Reindex '{{ plex_paths_db_location | basename }}' database"- ansible.builtin.shell: "'/opt/plexsql/Plex SQLite' '{{ plex_paths_db_location }}' 'REINDEX;'"+ - name: "Reindex '{{ lookup('role_var', '_paths_db_location', role='plex') | basename }}' database"+ ansible.builtin.shell: "'{{ server_appdata_path }}/plexsql/Plex SQLite' '{{ lookup('role_var', '_paths_db_location', role='plex') }}' 'REINDEX;'" register: plex_db_reindex failed_when: (plex_db_reindex.rc != 0) - - name: "Reindex '{{ plex_paths_db_blobs_location | basename }}' database"- ansible.builtin.shell: "'/opt/plexsql/Plex SQLite' '{{ plex_paths_db_blobs_location }}' 'REINDEX;'"+ - name: "Reindex '{{ lookup('role_var', '_paths_db_blobs_location', role='plex') | basename }}' database"+ ansible.builtin.shell: "'{{ server_appdata_path }}/plexsql/Plex SQLite' '{{ lookup('role_var', '_paths_db_blobs_location', role='plex') }}' 'REINDEX;'" register: plex_db_blobs_reindex failed_when: (plex_db_blobs_reindex.rc != 0) @@ -90,7 +90,7 @@ ansible.builtin.set_fact: plex_db_failed_optimization: true - - name: "Notify | Plex instance '{{ plex_name }}' failed the optimization tasks."+ - name: "Plex instance '{{ plex_name }}' failed the optimization tasks." ansible.builtin.include_role: name: notify vars:@@ -98,7 +98,7 @@ - name: Delete wal and shm files ansible.builtin.file:- path: "{{ plex_paths_plugin_support_location }}/Databases/{{ item }}"+ path: "{{ lookup('role_var', '_paths_plugin_support_location', role='plex') }}/Databases/{{ item }}" state: absent loop: - "com.plexapp.plugins.library.db-wal"@@ -107,7 +107,7 @@ - "com.plexapp.plugins.library.blobs.db-shm" - name: Restore database backup- ansible.builtin.shell: "cp -f '/tmp/{{ plex_name }}_backup/{{ item }}' '{{ plex_paths_plugin_support_location }}/Databases/{{ item }}'"+ ansible.builtin.shell: "cp -f '/tmp/{{ plex_name }}_backup/{{ item }}' '{{ lookup('role_var', '_paths_plugin_support_location', role='plex') }}/Databases/{{ item }}'" loop: "{{ plex_db_files }}" - name: Start Docker container
modified
roles/plex_extra_tasks/defaults/main.yml
@@ -8,5 +8,5 @@ ######################################################################### --- plex_extra_tasks_hostname: "{{ plex_name- if not ('container:' in lookup('vars', plex_name + '_docker_network_mode_default', default=docker_networks_name_common))- else (lookup('vars', plex_name + '_docker_network_mode_default', default=docker_networks_name_common).split(':')[1]) }}"+ if not ('container:' in lookup('role_var', '_docker_network_mode', role='plex', default=docker_networks_name_common))+ else (lookup('role_var', '_docker_network_mode', role='plex', default=docker_networks_name_common).split(':')[1]) }}"
modified
roles/plex_extra_tasks/tasks/main.yml
@@ -17,5 +17,5 @@ docker_installed_containers_list: "{{ (docker_installed_containers_list.stdout).split() }}" - name: "Continue when Plex Docker container exists"- ansible.builtin.import_tasks: "main2.yml"- when: (plex_docker_container in docker_installed_containers_list)+ ansible.builtin.include_tasks: "main2.yml"+ when: (lookup('role_var', '_docker_container', role='plex') in docker_installed_containers_list)
modified
roles/plex_extra_tasks/tasks/main2.yml
@@ -7,18 +7,18 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Wait for '{{ plex_paths_config_location | basename }}' to be created+- name: Wait for '{{ lookup('role_var', '_paths_config_location', role='plex') | basename }}' to be created ansible.builtin.wait_for:- path: "{{ plex_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='plex') }}" state: present - name: Wait for {{ plex_name | title }} DB to be created ansible.builtin.wait_for:- path: "{{ plex_paths_db_location }}"+ path: "{{ lookup('role_var', '_paths_db_location', role='plex') }}" state: present - name: Wait for {{ plex_name | title }} executable to be created- ansible.builtin.shell: docker exec {{ plex_docker_container }} bash -c "ls '/usr/lib/plexmediaserver/Plex Media Server'"+ ansible.builtin.shell: docker exec {{ lookup('role_var', '_docker_container', role='plex') }} bash -c "ls '/usr/lib/plexmediaserver/Plex Media Server'" register: pms_check until: pms_check.stderr.find("No such file or directory") == -1 retries: 600@@ -42,17 +42,17 @@ - name: Ensure transcodes folder has the correct permissions ansible.builtin.file:- path: "{{ plex_paths_transcodes_location }}"+ path: "{{ lookup('role_var', '_paths_transcodes_location', role='plex') }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775" recurse: true -- name: Find external port binding for '{{ plex_docker_container }}' docker port '{{ plex_web_port }}'- when: plex_open_main_ports+- name: Find external port binding for '{{ lookup('role_var', '_docker_container', role='plex') }}' docker port '{{ lookup('role_var', '_web_port', role='plex') }}'+ when: lookup('role_var', '_open_main_ports', role='plex') block:- - name: Lookup host port mapped to '{{ plex_docker_container }}' docker port '{{ plex_web_port }}'- ansible.builtin.shell: docker port {{ plex_docker_container }} {{ plex_web_port }} 2> /dev/null | sed 's/[0-9.]*://'+ - name: Lookup host port mapped to '{{ lookup('role_var', '_docker_container', role='plex') }}' docker port '{{ lookup('role_var', '_web_port', role='plex') }}'+ ansible.builtin.shell: docker port {{ lookup('role_var', '_docker_container', role='plex') }} {{ lookup('role_var', '_web_port', role='plex') }} 2> /dev/null | sed 's/[0-9.]*://' register: plex_docker_port_lookup_cmd changed_when: false @@ -60,21 +60,21 @@ ansible.builtin.set_fact: plex_docker_port_lookup: "{{ (plex_docker_port_lookup_cmd.stdout | trim) if (plex_docker_port_lookup_cmd.stdout | trim | length > 0)- else plex_web_port }}"+ else lookup('role_var', '_web_port', role='plex') }}" -- name: Update port in '{{ plex_paths_config_location | basename }}'+- name: Update port in '{{ lookup('role_var', '_paths_config_location', role='plex') | basename }}' community.general.xml:- path: "{{ plex_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='plex') }}" xpath: /Preferences attribute: ManualPortMappingPort- value: "{{ plex_docker_port_lookup | default(plex_web_port) }}"+ value: "{{ plex_docker_port_lookup | default(lookup('role_var', '_web_port', role='plex')) }}" state: present become: true become_user: "{{ user.name }}" -- name: Disable Remote Access in '{{ plex_paths_config_location | basename }}'+- name: Disable Remote Access in '{{ lookup('role_var', '_paths_config_location', role='plex') | basename }}' community.general.xml:- path: "{{ plex_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='plex') }}" xpath: /Preferences attribute: PublishServerOnPlexOnlineKey value: "0"@@ -82,9 +82,9 @@ become: true become_user: "{{ user.name }}" -- name: Disable Relay in '{{ plex_paths_config_location | basename }}'+- name: Disable Relay in '{{ lookup('role_var', '_paths_config_location', role='plex') | basename }}' community.general.xml:- path: "{{ plex_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='plex') }}" xpath: /Preferences attribute: RelayEnabled value: "0"@@ -92,22 +92,12 @@ become: true become_user: "{{ user.name }}" -- name: Reset permissions of '{{ plex_paths_config_location | basename }}'+- name: Reset permissions of '{{ lookup('role_var', '_paths_config_location', role='plex') | basename }}' ansible.builtin.file:- path: "{{ plex_paths_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='plex') }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0644"--- name: "Install Sub-Zero Plugin"- ansible.builtin.include_role:- name: sub_zero- when: plex_plugin_sub_zero--- name: "Install WebTools Plugin"- ansible.builtin.include_role:- name: webtools- when: plex_plugin_webtools - name: Start Docker Container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_docker_container.yml"
modified
roles/plex_fix_futures/tasks/main2.yml
@@ -15,6 +15,7 @@ token: "" owner: "{{ user.name }}" group: "{{ user.name }}"+ base_path: "{{ server_appdata_path }}" register: plex_instance_token - name: Plex API Block@@ -22,7 +23,7 @@ block: - name: Get items that have a date set in the future ansible.builtin.shell: |- curl -s '{{ plex_web_url }}/library/all?X-Plex-Token={{ plex_instance_token.facts.token }}' -H 'Accept: application/json' | jq -c --arg now '{{ ansible_date_time.epoch }}' '.MediaContainer.Metadata[] | select(.addedAt > ($now|tonumber)) | {id: .ratingKey, addedAt: .addedAt, updatedAt: .updatedAt, title: .title, librarySectionID: .librarySectionID, type: .type}'+ curl -s '{{ lookup('role_var', '_web_url', role='plex') }}/library/all?X-Plex-Token={{ plex_instance_token.facts.token }}' -H 'Accept: application/json' | jq -c --arg now '{{ ansible_facts['date_time']['epoch'] }}' '.MediaContainer.Metadata[] | select(.addedAt > ($now|tonumber)) | {id: .ratingKey, addedAt: .addedAt, updatedAt: .updatedAt, title: .title, librarySectionID: .librarySectionID, type: .type}' register: plex_future_items_request - name: Convert request output into a list of dictionaries@@ -36,7 +37,7 @@ - name: Fix future dates ansible.builtin.shell: |- curl -X PUT '{{ plex_web_url }}/library/sections/{{ item.librarySectionID }}/all?type={{ item.type_id }}&id={{ item.id }}&addedAt.value={{ item.updatedAt }}&X-Plex-Token={{ plex_instance_token.facts.token }}'+ curl -X PUT '{{ lookup('role_var', '_web_url', role='plex') }}/library/sections/{{ item.librarySectionID }}/all?type={{ item.type_id }}&id={{ item.id }}&addedAt.value={{ item.updatedAt }}&X-Plex-Token={{ plex_instance_token.facts.token }}' loop: "{{ plex_future_items_typed }}" register: plex_future_items_task when: plex_future_items_typed is defined
modified
roles/portainer/defaults/main.yml
@@ -17,151 +17,109 @@ # Settings ################################ -portainer_business_edition: false+portainer_role_business_edition: false ################################ # Paths ################################ -portainer_paths_folder: "{{ portainer_name }}"-portainer_paths_location: "{{ server_appdata_path }}/{{ portainer_paths_folder }}"-portainer_paths_folders_list:- - "{{ portainer_paths_location }}"+portainer_role_paths_folder: "{{ portainer_name }}"+portainer_role_paths_location: "{{ server_appdata_path }}/{{ portainer_role_paths_folder }}"+portainer_role_paths_folders_list:+ - "{{ portainer_role_paths_location }}" ################################ # Web ################################ -portainer_web_subdomain: "{{ portainer_name }}"-portainer_web_domain: "{{ user.domain }}"-portainer_web_port: "9000"-portainer_web_url: "{{ 'https://' + (portainer_web_subdomain + '.' + portainer_web_domain- if (portainer_web_subdomain | length > 0)- else portainer_web_domain) }}"+portainer_role_web_subdomain: "{{ portainer_name }}"+portainer_role_web_domain: "{{ user.domain }}"+portainer_role_web_port: "9000"+portainer_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='portainer') + '.' + lookup('role_var', '_web_domain', role='portainer')+ if (lookup('role_var', '_web_subdomain', role='portainer') | length > 0)+ else lookup('role_var', '_web_domain', role='portainer')) }}" ################################ # DNS ################################ -portainer_dns_record: "{{ portainer_web_subdomain }}"-portainer_dns_zone: "{{ portainer_web_domain }}"-portainer_dns_proxy: "{{ dns.proxied }}"+portainer_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='portainer') }}"+portainer_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='portainer') }}"+portainer_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -portainer_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-portainer_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', portainer_name + '_name', default=portainer_name)- if (portainer_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-portainer_traefik_middleware_custom: ""-portainer_traefik_certresolver: "{{ traefik_default_certresolver }}"-portainer_traefik_enabled: true-portainer_traefik_api_enabled: true-portainer_traefik_api_endpoint: "PathPrefix(`/api`)"+portainer_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+portainer_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + portainer_name+ if (lookup('role_var', '_themepark_enabled', role='portainer') and global_themepark_plugin_enabled)+ else '') }}"+portainer_role_traefik_middleware_custom: ""+portainer_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+portainer_role_traefik_enabled: true+portainer_role_traefik_api_enabled: true+portainer_role_traefik_api_endpoint: "PathPrefix(`/api`)" ################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-portainer_themepark_enabled: false-portainer_themepark_theme: "{{ global_themepark_theme }}"+portainer_role_themepark_enabled: false+portainer_role_themepark_theme: "{{ global_themepark_theme }}" ################################ # Docker ################################ # Container-portainer_docker_container: "{{ portainer_name }}"+portainer_role_docker_container: "{{ portainer_name }}" # Image-portainer_docker_image_pull: true-portainer_docker_image_tag: "latest"-portainer_docker_image: "{{ ('portainer/portainer-ee:'- if portainer_business_edition- else 'portainer/portainer-ce:') + portainer_docker_image_tag }}"--# Ports-portainer_docker_ports_defaults: []-portainer_docker_ports_custom: []-portainer_docker_ports: "{{ portainer_docker_ports_defaults- + portainer_docker_ports_custom }}"+portainer_role_docker_image_pull: true+portainer_role_docker_image_tag: "latest"+portainer_role_docker_image_repo: "{{ 'portainer/portainer-ee'+ if lookup('role_var', '_business_edition', role='portainer')+ else 'portainer/portainer-ce' }}"+portainer_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='portainer') }}:{{ lookup('role_var', '_docker_image_tag', role='portainer') }}" # Envs-portainer_docker_envs_default:+portainer_role_docker_envs_default: TZ: "{{ tz }}"-portainer_docker_envs_custom: {}-portainer_docker_envs: "{{ portainer_docker_envs_default- | combine(portainer_docker_envs_custom) }}"--# Commands-portainer_docker_commands_default: []-portainer_docker_commands_custom: []-portainer_docker_commands: "{{ portainer_docker_commands_default- + portainer_docker_commands_custom }}"+portainer_role_docker_envs_custom: {}+portainer_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='portainer')+ | combine(lookup('role_var', '_docker_envs_custom', role='portainer')) }}" # Volumes-portainer_docker_volumes_default:- - "{{ portainer_paths_location }}:/data"+portainer_role_docker_volumes_default:+ - "{{ portainer_role_paths_location }}:/data" - "/var/run/docker.sock:/var/run/docker.sock"-portainer_docker_volumes_custom: []-portainer_docker_volumes: "{{ portainer_docker_volumes_default- + portainer_docker_volumes_custom }}"--# Devices-portainer_docker_devices_default: []-portainer_docker_devices_custom: []-portainer_docker_devices: "{{ portainer_docker_devices_default- + portainer_docker_devices_custom }}"--# Hosts-portainer_docker_hosts_default: {}-portainer_docker_hosts_custom: {}-portainer_docker_hosts: "{{ docker_hosts_common- | combine(portainer_docker_hosts_default)- | combine(portainer_docker_hosts_custom) }}"+portainer_role_docker_volumes_custom: []+portainer_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='portainer')+ + lookup('role_var', '_docker_volumes_custom', role='portainer') }}" # Labels-portainer_docker_labels_default: {}-portainer_docker_labels_custom: {}-portainer_docker_labels_themepark:- - '{ "traefik.http.middlewares.themepark-{{ lookup("vars", portainer_name + "_name", default=portainer_name) }}.plugin.themepark.app": "portainer" }'- - '{ "traefik.http.middlewares.themepark-{{ lookup("vars", portainer_name + "_name", default=portainer_name) }}.plugin.themepark.theme": "{{ lookup("vars", portainer_name + "_themepark_theme", default=portainer_themepark_theme) }}" }'-portainer_docker_labels: "{{ docker_labels_common- | combine(portainer_docker_labels_default)- | combine((lookup('vars', portainer_name + '_docker_labels_themepark', default=portainer_docker_labels_themepark)- if (portainer_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', portainer_name + '_docker_labels_custom', default=portainer_docker_labels_custom)) }}"+portainer_role_docker_labels_custom: {}+portainer_role_docker_labels: "{{ lookup('role_var', '_docker_labels_custom', role='portainer')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='portainer') and global_themepark_plugin_enabled)+ else {})) }}" # Hostname-portainer_docker_hostname: "{{ portainer_name }}"+portainer_role_docker_hostname: "{{ portainer_name }}" # Networks-portainer_docker_networks_alias: "{{ portainer_name }}"-portainer_docker_networks_default: []-portainer_docker_networks_custom: []-portainer_docker_networks: "{{ docker_networks_common- + portainer_docker_networks_default- + portainer_docker_networks_custom }}"--# Capabilities-portainer_docker_capabilities_default: []-portainer_docker_capabilities_custom: []-portainer_docker_capabilities: "{{ portainer_docker_capabilities_default- + portainer_docker_capabilities_custom }}"--# Security Opts-portainer_docker_security_opts_default: []-portainer_docker_security_opts_custom: []-portainer_docker_security_opts: "{{ portainer_docker_security_opts_default- + portainer_docker_security_opts_custom }}"+portainer_role_docker_networks_alias: "{{ portainer_name }}"+portainer_role_docker_networks_default: []+portainer_role_docker_networks_custom: []+portainer_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='portainer')+ + lookup('role_var', '_docker_networks_custom', role='portainer') }}" # Restart Policy-portainer_docker_restart_policy: unless-stopped+portainer_role_docker_restart_policy: unless-stopped # State-portainer_docker_state: started+portainer_role_docker_state: started
modified
roles/portainer/tasks/main.yml
@@ -10,16 +10,16 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml" - name: "Check if config folder exists" ansible.builtin.stat:- path: "{{ portainer_paths_location }}"+ path: "{{ portainer_role_paths_location }}" register: portainer_config_stat - name: Create directories@@ -29,5 +29,5 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: Setup Tasks- ansible.builtin.import_tasks: "subtasks/setup.yml"+ ansible.builtin.include_tasks: "subtasks/setup.yml" when: not portainer_config_stat.stat.exists and (user.pass | length >= 12) and (user.pass | length <= 72)
modified
roles/portainer/tasks/subtasks/setup.yml
@@ -13,8 +13,9 @@ - name: Setup | Create Saltbox user ansible.builtin.uri:- url: "http://{{ portainer_name }}:9000/api/users/admin/init"+ url: "http://{{ portainer_name }}:{{ portainer_role_web_port }}/api/users/admin/init" method: POST- return_content: yes+ return_content: true body_format: json body: {"Username": "{{ user.name }}", "Password": "{{ user.pass }}"}+ no_log: true
modified
roles/postgres/defaults/main.yml
@@ -17,124 +17,70 @@ # Settings ################################ -postgres_docker_env_password: "password4321"-postgres_docker_env_user: "{{ user.name }}"-postgres_docker_env_db: "saltbox"--# Memory limit in format <number>[<unit>].-# Number is a positive integer.-# Unit can be B (byte), K (kibibyte, 1024B), M (mebibyte), G (gibibyte), T (tebibyte), or P (pebibyte).-postgres_docker_memory_limit: 0+postgres_role_docker_env_password: "password4321"+postgres_role_docker_env_user: "{{ user.name }}"+postgres_role_docker_env_db: "saltbox" ################################ # Paths ################################ -postgres_paths_folder: "{{ postgres_name }}"-postgres_paths_location: "{{ server_appdata_path }}/{{ postgres_paths_folder }}"-postgres_paths_folders_list:- - "{{ postgres_paths_location }}"+postgres_role_paths_folder: "{{ postgres_name }}"+postgres_role_paths_location: "{{ server_appdata_path }}/{{ postgres_role_paths_folder }}"+postgres_role_paths_folders_list:+ - "{{ postgres_role_paths_location }}" ################################ # Docker ################################ # Container-postgres_docker_container: "{{ postgres_name }}"+postgres_role_docker_container: "{{ postgres_name }}" # Image-postgres_docker_image_pull: true-postgres_docker_image_tag: "17-alpine"-postgres_docker_image_repo: "postgres"-postgres_docker_image: "{{ lookup('vars', postgres_name + '_docker_image_repo', default=postgres_docker_image_repo)- + ':' + lookup('vars', postgres_name + '_docker_image_tag', default=postgres_docker_image_tag) }}"--# Ports-postgres_docker_ports_defaults: []-postgres_docker_ports_custom: []-postgres_docker_ports: "{{ postgres_docker_ports_defaults- + postgres_docker_ports_custom }}"+postgres_role_docker_image_pull: true+postgres_role_docker_image_tag: "17-alpine"+postgres_role_docker_image_repo: "postgres"+postgres_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='postgres') }}:{{ lookup('role_var', '_docker_image_tag', role='postgres') }}" # Envs-postgres_docker_envs_default:+postgres_role_docker_envs_default: TZ: "{{ tz }}" PGDATA: "/data"- POSTGRES_PASSWORD: "{{ lookup('vars', postgres_name + '_docker_env_password', default=postgres_docker_env_password) }}"- POSTGRES_USER: "{{ lookup('vars', postgres_name + '_docker_env_user', default=postgres_docker_env_user) }}"- POSTGRES_DB: "{{ lookup('vars', postgres_name + '_docker_env_db', default=postgres_docker_env_db) }}"-postgres_docker_envs_custom: {}-postgres_docker_envs: "{{ lookup('vars', postgres_name + '_docker_envs_default', default=postgres_docker_envs_default)- | combine(lookup('vars', postgres_name + '_docker_envs_custom', default=postgres_docker_envs_custom)) }}"--# Commands-postgres_docker_commands_default: []-postgres_docker_commands_custom: []-postgres_docker_commands: "{{ lookup('vars', postgres_name + '_docker_commands_default', default=postgres_docker_commands_default)- + lookup('vars', postgres_name + '_docker_commands_custom', default=postgres_docker_commands_custom) }}"+ POSTGRES_PASSWORD: "{{ lookup('role_var', '_docker_env_password', role='postgres') }}"+ POSTGRES_USER: "{{ lookup('role_var', '_docker_env_user', role='postgres') }}"+ POSTGRES_DB: "{{ lookup('role_var', '_docker_env_db', role='postgres') }}"+postgres_role_docker_envs_custom: {}+postgres_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='postgres')+ | combine(lookup('role_var', '_docker_envs_custom', role='postgres')) }}" # Volumes-postgres_docker_volumes_default:- - "{{ postgres_paths_location }}:/data"+postgres_role_docker_volumes_default:+ - "{{ postgres_role_paths_location }}:/data" - "/etc/passwd:/etc/passwd:ro"-postgres_docker_volumes_custom: []-postgres_docker_volumes: "{{ lookup('vars', postgres_name + '_docker_volumes_default', default=postgres_docker_volumes_default)- + lookup('vars', postgres_name + '_docker_volumes_custom', default=postgres_docker_volumes_custom) }}"--# Devices-postgres_docker_devices_default: []-postgres_docker_devices_custom: []-postgres_docker_devices: "{{ lookup('vars', postgres_name + '_docker_devices_default', default=postgres_docker_devices_default)- + lookup('vars', postgres_name + '_docker_devices_custom', default=postgres_docker_devices_custom) }}"--# Hosts-postgres_docker_hosts_default: {}-postgres_docker_hosts_custom: {}-postgres_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', postgres_name + '_docker_hosts_default', default=postgres_docker_hosts_default))- | combine(lookup('vars', postgres_name + '_docker_hosts_custom', default=postgres_docker_hosts_custom)) }}"--# Labels-postgres_docker_labels_default: {}-postgres_docker_labels_custom: {}-postgres_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', postgres_name + '_docker_labels_default', default=postgres_docker_labels_default))- | combine(lookup('vars', postgres_name + '_docker_labels_custom', default=postgres_docker_labels_custom)) }}"+postgres_role_docker_volumes_custom: []+postgres_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='postgres')+ + lookup('role_var', '_docker_volumes_custom', role='postgres') }}" # Hostname-postgres_docker_hostname: "{{ postgres_name }}"--# Network Mode-postgres_docker_network_mode_default: "{{ docker_networks_name_common }}"-postgres_docker_network_mode: "{{ lookup('vars', postgres_name + '_docker_network_mode_default', default=postgres_docker_network_mode_default) }}"+postgres_role_docker_hostname: "{{ postgres_name }}" # Networks-postgres_docker_networks_alias: "{{ postgres_name }}"-postgres_docker_networks_default: []-postgres_docker_networks_custom: []-postgres_docker_networks: "{{ docker_networks_common- + lookup('vars', postgres_name + '_docker_networks_default', default=postgres_docker_networks_default)- + lookup('vars', postgres_name + '_docker_networks_dcustom', default=postgres_docker_networks_custom) }}"--# Capabilities-postgres_docker_capabilities_default: []-postgres_docker_capabilities_custom: []-postgres_docker_capabilities: "{{ lookup('vars', postgres_name + '_docker_capabilities_default', default=postgres_docker_capabilities_default)- + lookup('vars', postgres_name + '_docker_capabilities_custom', default=postgres_docker_capabilities_custom) }}"--# Security Opts-postgres_docker_security_opts_default: []-postgres_docker_security_opts_custom: []-postgres_docker_security_opts: "{{ lookup('vars', postgres_name + '_docker_security_opts_default', default=postgres_docker_security_opts_default)- + lookup('vars', postgres_name + '_docker_security_opts_custom', default=postgres_docker_security_opts_custom) }}"+postgres_role_docker_networks_alias: "{{ postgres_name }}"+postgres_role_docker_networks_default: []+postgres_role_docker_networks_custom: []+postgres_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='postgres')+ + lookup('role_var', '_docker_networks_custom', role='postgres') }}" # Restart Policy-postgres_docker_restart_policy: unless-stopped+postgres_role_docker_restart_policy: unless-stopped # State-postgres_docker_state: started+postgres_role_docker_state: started # User-postgres_docker_user: "{{ uid }}:{{ gid }}"+postgres_role_docker_user: "{{ uid }}:{{ gid }}" -# Memory Limit-postgres_docker_memory: "{{ lookup('vars', postgres_name + '_docker_memory_limit', default=postgres_docker_memory_limit) }}"+# SHM size+postgres_role_docker_shm_size: "128M"
modified
roles/postgres/tasks/main2.yml
@@ -11,28 +11,33 @@ ansible.builtin.set_fact: postgres_major_version: "" postgres_docker_tag_major_version: ""- postgres_original_image_repo: "{{ true if postgres_docker_image_repo == 'postgres' else false }}"+ postgres_original_image_repo: "{{ true if lookup('role_var', '_docker_image_repo', role='postgres') == 'postgres' else false }}" - name: Supported image repository when: postgres_original_image_repo block:- - name: "Check if '{{ postgres_paths_location }}' exists"+ - name: "Check if '{{ lookup('role_var', '_paths_location', role='postgres') }}' exists" ansible.builtin.stat:- path: "{{ postgres_paths_location }}"+ path: "{{ lookup('role_var', '_paths_location', role='postgres') }}" register: stat_postgres_path + - name: "Check if '{{ lookup('role_var', '_paths_location', role='postgres') }}/PG_VERSION' exists"+ ansible.builtin.stat:+ path: "{{ lookup('role_var', '_paths_location', role='postgres') }}/PG_VERSION"+ register: stat_postgres_version_path+ - name: Check for upgrade- when: stat_postgres_path.stat.exists+ when: stat_postgres_path.stat.exists and stat_postgres_version_path.stat.exists block: - name: Read PG_VERSION file ansible.builtin.slurp:- src: "{{ postgres_paths_location }}/PG_VERSION"+ src: "{{ lookup('role_var', '_paths_location', role='postgres') }}/PG_VERSION" register: pg_version_file - name: Define installed and desired Postgres versions ansible.builtin.set_fact: postgres_major_version: "{{ pg_version_file['content'] | b64decode | trim }}"- postgres_docker_tag_major_version: "{{ lookup('vars', postgres_name + '_docker_image_tag', default=postgres_docker_image_tag).split('-')[0] | regex_replace('^([0-9]+).*', '\\1') }}"+ postgres_docker_tag_major_version: "{{ lookup('role_var', '_docker_image_tag', role='postgres').split('-')[0] | regex_replace('^([0-9]+).*', '\\1') }}" - name: Display PostgreSQL version ansible.builtin.debug:@@ -42,8 +47,8 @@ ansible.builtin.assert: that: - postgres_docker_tag_major_version is regex("^[0-9]+$")- fail_msg: "Invalid version specified for 'postgres_docker_image_tag': {{ lookup('vars', postgres_name + '_docker_image_tag', default=postgres_docker_image_tag) }}"- success_msg: "Valid version specified for 'postgres_docker_image_tag': {{ lookup('vars', postgres_name + '_docker_image_tag', default=postgres_docker_image_tag) }}"+ fail_msg: "Invalid version specified for 'postgres_docker_image_tag': {{ lookup('role_var', '_docker_image_tag', role='postgres') }}"+ success_msg: "Valid version specified for 'postgres_docker_image_tag': {{ lookup('role_var', '_docker_image_tag', role='postgres') }}" - name: Upgrade PostgreSQL when: stat_postgres_path.stat.exists and (postgres_major_version != postgres_docker_tag_major_version)@@ -56,39 +61,39 @@ - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml" - - name: "Check if '{{ postgres_paths_location }}_backup' exists"+ - name: "Check if '{{ lookup('role_var', '_paths_location', role='postgres') }}_backup' exists" ansible.builtin.stat:- path: "{{ postgres_paths_location }}_backup"+ path: "{{ lookup('role_var', '_paths_location', role='postgres') }}_backup" register: stat_postgres_backup_path - name: Fail if backup location already exists ansible.builtin.fail: msg: - "A backup of the postgres folder already exists, clean it up if it was from a previously successful upgrade. Otherwise ask for help on the discord."- - "Path is {{ postgres_paths_location }}_backup"+ - "Path is {{ lookup('role_var', '_paths_location', role='postgres') }}_backup" when: stat_postgres_backup_path.stat.exists - name: Move Postgres data folder- ansible.builtin.shell: mv "{{ postgres_paths_location }}" "{{ postgres_paths_location }}_backup"+ ansible.builtin.shell: mv "{{ lookup('role_var', '_paths_location', role='postgres') }}" "{{ lookup('role_var', '_paths_location', role='postgres') }}_backup" - name: Create directories ansible.builtin.include_tasks: "{{ resources_tasks_path }}/directories/create_directories.yml" - - name: "Create Docker container (Desired tag: {{ lookup('vars', postgres_name + '_docker_image_tag', default=postgres_docker_image_tag) }})"+ - name: "Create Docker container (Desired tag: {{ lookup('role_var', '_docker_image_tag', role='postgres') }})" ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" vars:- postgres_docker_container: "{{ postgres_name + '_new' }}"- postgres_docker_networks_alias: "{{ postgres_name + '_new' }}"+ postgres_role_docker_container: "{{ postgres_name + '_new' }}"+ postgres_role_docker_networks_alias: "{{ postgres_name + '_new' }}" - name: "Create Docker container (Previous tag: {{ postgres_major_version }}-alpine)" ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" vars:- postgres_docker_container: "{{ postgres_name + '_old' }}"- postgres_docker_networks_alias: "{{ postgres_name + '_old' }}"- postgres_docker_image: "{{ lookup('vars', postgres_name + '_docker_image_repo', default=postgres_docker_image_repo)- + ':' + postgres_major_version + '-alpine' }}"- postgres_docker_volumes_default:- - "{{ postgres_paths_location }}_backup:/data"+ postgres_role_docker_container: "{{ postgres_name + '_old' }}"+ postgres_role_docker_networks_alias: "{{ postgres_name + '_old' }}"+ postgres_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='postgres')+ + ':' + postgres_major_version + '-alpine' }}"+ postgres_role_docker_volumes_default:+ - "{{ postgres_role_paths_location }}_backup:/data" - "/etc/passwd:/etc/passwd:ro" - name: Sleep for 60 seconds@@ -111,13 +116,16 @@ - name: Update user password for SCRAM authentication in new container ansible.builtin.shell: |- docker exec {{ postgres_name }}_new psql -U {{ lookup('vars', postgres_name + '_docker_env_user', default=postgres_docker_env_user) }} -d {{ lookup('vars', postgres_name + '_docker_env_db', default=postgres_docker_env_db) }} -c "ALTER USER {{ lookup('vars', postgres_name + '_docker_env_user', default=postgres_docker_env_user) }} WITH PASSWORD '{{ lookup('vars', postgres_name + '_docker_env_password', default=postgres_docker_env_password) }}';"+ docker exec {{ postgres_name }}_new psql -U {{ lookup('role_var', '_docker_env_user', role='postgres') }} -d {{ lookup('role_var', '_docker_env_db', role='postgres') }} -c "ALTER USER {{ lookup('role_var', '_docker_env_user', role='postgres') }} WITH PASSWORD '{{ lookup('role_var', '_docker_env_password', role='postgres') }}';" when: (postgres_major_version | int < 14) and (postgres_docker_tag_major_version | int >= 14) - name: Remove temporary containers community.docker.docker_container: name: "{{ item }}" state: absent+ container_default_behavior: compatibility+ stop_timeout: "{{ lookup('role_var', '_docker_stop_timeout', default='180') }}"+ tls_hostname: localhost loop: - "{{ postgres_name }}_old" - "{{ postgres_name }}_new"
modified
roles/pre_tasks/tasks/main.yml
@@ -8,10 +8,10 @@ ######################################################################### --- - name: APT Tasks- ansible.builtin.import_tasks: "subtasks/apt.yml"+ ansible.builtin.include_tasks: "subtasks/apt.yml" - name: Git Tasks- ansible.builtin.import_tasks: "subtasks/git.yml"+ ansible.builtin.include_tasks: "subtasks/git.yml" - name: Variables Tasks ansible.builtin.include_tasks: "subtasks/variables.yml"
modified
roles/pre_tasks/tasks/subtasks/git.yml
@@ -23,5 +23,5 @@ when: ('preinstall' in ansible_run_tags) or (not '/srv/git/sb' in git_config.stdout) - name: Git | Set Sandbox repository as safe directory- ansible.builtin.shell: git config --global --add safe.directory /opt/sandbox- when: ('preinstall' in ansible_run_tags) or (not '/opt/sandbox' in git_config.stdout)+ ansible.builtin.shell: git config --global --add safe.directory {{ server_appdata_path }}/sandbox+ when: ('preinstall' in ansible_run_tags) or (not server_appdata_path + '/sandbox' in git_config.stdout)
modified
roles/pre_tasks/tasks/subtasks/variables.yml
@@ -16,10 +16,10 @@ with_items: "{{ pre_tasks_saltbox_roles.stdout_lines }}" - name: Sandbox- when: (playbook_dir == "/opt/saltbox_mod")+ when: (playbook_dir == pre_tasks_saltbox_mod_location) block: - name: Find Sandbox roles- ansible.builtin.shell: "find /opt/sandbox/roles -type d -name 'defaults'"+ ansible.builtin.shell: "find {{ server_appdata_path }}/sandbox/roles -type d -name 'defaults'" register: pre_tasks_sandbox_roles - name: Include Sandbox role default vars
modified
roles/prometheus/defaults/main.yml
@@ -18,143 +18,112 @@ ################################ # https://prometheus.io/docs/prometheus/latest/storage/ -prometheus_retention: "15d"-prometheus_size: "0"+prometheus_role_retention: "15d"+prometheus_role_size: "0" ################################ # Paths ################################ -prometheus_paths_folder: "{{ prometheus_name }}"-prometheus_paths_location: "{{ server_appdata_path }}/{{ prometheus_paths_folder }}"-prometheus_config_path: "{{ prometheus_paths_location }}/prometheus.yml"-prometheus_paths_folders_list:- - "{{ prometheus_paths_location }}"- - "{{ prometheus_paths_location }}/data"+prometheus_role_paths_folder: "{{ prometheus_name }}"+prometheus_role_paths_location: "{{ server_appdata_path }}/{{ prometheus_role_paths_folder }}"+prometheus_role_paths_config_path: "{{ prometheus_role_paths_location }}/prometheus.yml"+prometheus_role_paths_folders_list:+ - "{{ prometheus_role_paths_location }}"+ - "{{ prometheus_role_paths_location }}/data" ################################ # Web ################################ -prometheus_web_subdomain: "{{ prometheus_name }}"-prometheus_web_domain: "{{ user.domain }}"-prometheus_web_port: "9090"-prometheus_web_url: "{{ 'https://' + (prometheus_web_subdomain + '.' + prometheus_web_domain- if (prometheus_web_subdomain | length > 0)- else prometheus_web_domain) }}"+prometheus_role_web_subdomain: "{{ prometheus_name }}"+prometheus_role_web_domain: "{{ user.domain }}"+prometheus_role_web_port: "9090"+prometheus_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='prometheus') + '.' + lookup('role_var', '_web_domain', role='prometheus')+ if (lookup('role_var', '_web_subdomain', role='prometheus') | length > 0)+ else lookup('role_var', '_web_domain', role='prometheus')) }}" ################################ # DNS ################################ -prometheus_dns_record: "{{ prometheus_web_subdomain }}"-prometheus_dns_zone: "{{ prometheus_web_domain }}"-prometheus_dns_proxy: "{{ dns.proxied }}"+prometheus_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='prometheus') }}"+prometheus_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='prometheus') }}"+prometheus_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -prometheus_traefik_sso_middleware: ""-prometheus_traefik_middleware_default: "{{ traefik_default_middleware + ',prometheus-auth' }}"-prometheus_traefik_middleware_custom: ""-prometheus_traefik_certresolver: "{{ traefik_default_certresolver }}"-prometheus_traefik_enabled: true-prometheus_traefik_api_enabled: false-prometheus_traefik_api_endpoint: ""+prometheus_role_traefik_sso_middleware: ""+prometheus_role_traefik_middleware_default: "{{ traefik_default_middleware + ',prometheus-auth' }}"+prometheus_role_traefik_middleware_custom: ""+prometheus_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+prometheus_role_traefik_enabled: true+prometheus_role_traefik_api_enabled: false+prometheus_role_traefik_api_endpoint: "" ################################ # Docker ################################ # Container-prometheus_docker_container: "{{ prometheus_name }}"+prometheus_role_docker_container: "{{ prometheus_name }}" # Image-prometheus_docker_image_pull: true-prometheus_docker_image_tag: "latest"-prometheus_docker_image: "prom/prometheus:{{ prometheus_docker_image_tag }}"--# Ports-prometheus_docker_ports_defaults: []-prometheus_docker_ports_custom: []-prometheus_docker_ports: "{{ prometheus_docker_ports_defaults- + prometheus_docker_ports_custom }}"+prometheus_role_docker_image_pull: true+prometheus_role_docker_image_tag: "latest"+prometheus_role_docker_image_repo: "prom/prometheus"+prometheus_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='prometheus') }}:{{ lookup('role_var', '_docker_image_tag', role='prometheus') }}" # Envs-prometheus_docker_envs_default:+prometheus_role_docker_envs_default: TZ: "{{ tz }}"-prometheus_docker_envs_custom: {}-prometheus_docker_envs: "{{ prometheus_docker_envs_default- | combine(prometheus_docker_envs_custom) }}"+prometheus_role_docker_envs_custom: {}+prometheus_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='prometheus')+ | combine(lookup('role_var', '_docker_envs_custom', role='prometheus')) }}" # Commands-prometheus_docker_commands_default:+prometheus_role_docker_commands_default: - "--config.file=/etc/prometheus/prometheus.yml" - "--storage.tsdb.path=/data"- - "--storage.tsdb.retention.time={{ prometheus_retention }}"- - "--storage.tsdb.retention.size={{ prometheus_size }}"-prometheus_docker_commands_custom: []-prometheus_docker_commands: "{{ prometheus_docker_commands_default- + prometheus_docker_commands_custom }}"+ - "--storage.tsdb.retention.time={{ lookup('role_var', '_retention', role='prometheus') }}"+ - "--storage.tsdb.retention.size={{ lookup('role_var', '_size', role='prometheus') }}"+prometheus_role_docker_commands_custom: []+prometheus_role_docker_commands: "{{ lookup('role_var', '_docker_commands_default', role='prometheus')+ + lookup('role_var', '_docker_commands_custom', role='prometheus') }}" # Volumes-prometheus_docker_volumes_default:- - "{{ prometheus_paths_location }}:/etc/prometheus"- - "{{ prometheus_paths_location }}/data:/data"-prometheus_docker_volumes_custom: []-prometheus_docker_volumes: "{{ prometheus_docker_volumes_default- + prometheus_docker_volumes_custom }}"--# Devices-prometheus_docker_devices_default: []-prometheus_docker_devices_custom: []-prometheus_docker_devices: "{{ prometheus_docker_devices_default- + prometheus_docker_devices_custom }}"--# Hosts-prometheus_docker_hosts_default: {}-prometheus_docker_hosts_custom: {}-prometheus_docker_hosts: "{{ docker_hosts_common- | combine(prometheus_docker_hosts_default)- | combine(prometheus_docker_hosts_custom) }}"+prometheus_role_docker_volumes_default:+ - "{{ prometheus_role_paths_location }}:/etc/prometheus"+ - "{{ prometheus_role_paths_location }}/data:/data"+prometheus_role_docker_volumes_custom: []+prometheus_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='prometheus')+ + lookup('role_var', '_docker_volumes_custom', role='prometheus') }}" # Labels-prometheus_docker_labels_default:+prometheus_role_docker_labels_default: traefik.http.middlewares.prometheus-auth.basicauth.usersfile: "/etc/traefik/auth"-prometheus_docker_labels_custom: {}-prometheus_docker_labels: "{{ docker_labels_common- | combine(prometheus_docker_labels_default)- | combine(prometheus_docker_labels_custom) }}"+prometheus_role_docker_labels_custom: {}+prometheus_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='prometheus')+ | combine(lookup('role_var', '_docker_labels_custom', role='prometheus')) }}" # Hostname-prometheus_docker_hostname: "{{ prometheus_name }}"+prometheus_role_docker_hostname: "{{ prometheus_name }}" # Networks-prometheus_docker_networks_alias: "{{ prometheus_name }}"-prometheus_docker_networks_default: []-prometheus_docker_networks_custom: []-prometheus_docker_networks: "{{ docker_networks_common- + prometheus_docker_networks_default- + prometheus_docker_networks_custom }}"--# Capabilities-prometheus_docker_capabilities_default: []-prometheus_docker_capabilities_custom: []-prometheus_docker_capabilities: "{{ prometheus_docker_capabilities_default- + prometheus_docker_capabilities_custom }}"--# Security Opts-prometheus_docker_security_opts_default: []-prometheus_docker_security_opts_custom: []-prometheus_docker_security_opts: "{{ prometheus_docker_security_opts_default- + prometheus_docker_security_opts_custom }}"+prometheus_role_docker_networks_alias: "{{ prometheus_name }}"+prometheus_role_docker_networks_default: []+prometheus_role_docker_networks_custom: []+prometheus_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='prometheus')+ + lookup('role_var', '_docker_networks_custom', role='prometheus') }}" # Restart Policy-prometheus_docker_restart_policy: unless-stopped+prometheus_role_docker_restart_policy: unless-stopped # State-prometheus_docker_state: started+prometheus_role_docker_state: started # User-prometheus_docker_user: "{{ uid }}:{{ gid }}"+prometheus_role_docker_user: "{{ uid }}:{{ gid }}"
modified
roles/prometheus/tasks/main.yml
@@ -18,9 +18,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -31,7 +31,7 @@ - name: Import 'prometheus.yml' ansible.builtin.template: src: prometheus.yml.j2- dest: "{{ prometheus_config_path }}"+ dest: "{{ lookup('role_var', '_paths_config_path', role='prometheus') }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0644"
modified
roles/prometheus/templates/prometheus.yml.j2
@@ -10,7 +10,7 @@ static_configs: - targets: ['cadvisor:8080'] -{% if traefik.metrics %}+{% if lookup('role_var', '_metrics_enabled', role='traefik') %} - job_name: traefik scheme: https metrics_path: /prometheus@@ -18,5 +18,5 @@ username: '{{ user.name }}' password: '{{ user.pass }}' static_configs:- - targets: ['{{ traefik_metrics_subdomain }}.{{ traefik_metrics_domain }}']+ - targets: ['{{ lookup('role_var', '_metrics_subdomain', role='traefik') }}.{{ lookup('role_var', '_metrics_domain', role='traefik') }}'] {% endif %}
modified
roles/prowlarr/defaults/main.yml
@@ -17,151 +17,114 @@ # Settings ################################ -prowlarr_external_auth: true+prowlarr_role_external_auth: true ################################ # Paths ################################ -prowlarr_paths_folder: "{{ prowlarr_name }}"-prowlarr_paths_location: "{{ server_appdata_path }}/{{ prowlarr_paths_folder }}"-prowlarr_paths_folders_list:- - "{{ prowlarr_paths_location }}"+prowlarr_role_paths_folder: "{{ prowlarr_name }}"+prowlarr_role_paths_location: "{{ server_appdata_path }}/{{ prowlarr_role_paths_folder }}"+prowlarr_role_paths_folders_list:+ - "{{ prowlarr_role_paths_location }}" ################################ # Web ################################ -prowlarr_web_subdomain: "{{ prowlarr_name }}"-prowlarr_web_domain: "{{ user.domain }}"-prowlarr_web_port: "9696"-prowlarr_web_url: "{{ 'https://' + (prowlarr_web_subdomain + '.' + prowlarr_web_domain- if (prowlarr_web_subdomain | length > 0)- else prowlarr_web_domain) }}"+prowlarr_role_web_subdomain: "{{ prowlarr_name }}"+prowlarr_role_web_domain: "{{ user.domain }}"+prowlarr_role_web_port: "9696"+prowlarr_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='prowlarr') + '.' + lookup('role_var', '_web_domain', role='prowlarr')+ if (lookup('role_var', '_web_subdomain', role='prowlarr') | length > 0)+ else lookup('role_var', '_web_domain', role='prowlarr')) }}" ################################ # DNS ################################ -prowlarr_dns_record: "{{ prowlarr_web_subdomain }}"-prowlarr_dns_zone: "{{ prowlarr_web_domain }}"-prowlarr_dns_proxy: "{{ dns.proxied }}"+prowlarr_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='prowlarr') }}"+prowlarr_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='prowlarr') }}"+prowlarr_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -prowlarr_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-prowlarr_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', prowlarr_name + '_name', default=prowlarr_name)- if (prowlarr_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-prowlarr_traefik_middleware_custom: ""-prowlarr_traefik_certresolver: "{{ traefik_default_certresolver }}"-prowlarr_traefik_enabled: true-prowlarr_traefik_api_enabled: true-prowlarr_traefik_api_endpoint: "PathRegexp(`/[0-9]+/api`) || PathRegexp(`/[0-9]+/download`) || PathPrefix(`/api`) || PathPrefix(`/ping`)"+prowlarr_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+prowlarr_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + prowlarr_name+ if (lookup('role_var', '_themepark_enabled', role='prowlarr') and global_themepark_plugin_enabled)+ else '') }}"+prowlarr_role_traefik_middleware_custom: ""+prowlarr_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+prowlarr_role_traefik_enabled: true+prowlarr_role_traefik_api_enabled: true+prowlarr_role_traefik_api_endpoint: "PathRegexp(`/[0-9]+/api`) || PathRegexp(`/[0-9]+/download`) || PathPrefix(`/api`) || PathPrefix(`/ping`)" ################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-prowlarr_themepark_enabled: false-prowlarr_themepark_app: "prowlarr"-prowlarr_themepark_theme: "{{ global_themepark_theme }}"-prowlarr_themepark_domain: "{{ global_themepark_domain }}"-prowlarr_themepark_addons: []+prowlarr_role_themepark_enabled: false+prowlarr_role_themepark_app: "prowlarr"+prowlarr_role_themepark_theme: "{{ global_themepark_theme }}"+prowlarr_role_themepark_domain: "{{ global_themepark_domain }}"+prowlarr_role_themepark_addons: [] ################################ # Docker ################################ # Container-prowlarr_docker_container: "{{ prowlarr_name }}"+prowlarr_role_docker_container: "{{ prowlarr_name }}" # Image-prowlarr_docker_image_pull: true-prowlarr_docker_image_tag: "release"-prowlarr_docker_image: "ghcr.io/hotio/prowlarr:{{ prowlarr_docker_image_tag }}"--# Ports-prowlarr_docker_ports_defaults: []-prowlarr_docker_ports_custom: []-prowlarr_docker_ports: "{{ prowlarr_docker_ports_defaults- + prowlarr_docker_ports_custom }}"+prowlarr_role_docker_image_pull: true+prowlarr_role_docker_image_tag: "release"+prowlarr_role_docker_image_repo: "ghcr.io/hotio/prowlarr"+prowlarr_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='prowlarr') }}:{{ lookup('role_var', '_docker_image_tag', role='prowlarr') }}" # Envs-prowlarr_docker_envs_default:+prowlarr_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" UMASK: "002" TZ: "{{ tz }}"-prowlarr_docker_envs_custom: {}-prowlarr_docker_envs: "{{ prowlarr_docker_envs_default- | combine(prowlarr_docker_envs_custom) }}"--# Commands-prowlarr_docker_commands_default: []-prowlarr_docker_commands_custom: []-prowlarr_docker_commands: "{{ prowlarr_docker_commands_default- + prowlarr_docker_commands_custom }}"+prowlarr_role_docker_envs_custom: {}+prowlarr_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='prowlarr')+ | combine(lookup('role_var', '_docker_envs_custom', role='prowlarr')) }}" # Volumes-prowlarr_docker_volumes_default:- - "{{ prowlarr_paths_location }}:/config"-prowlarr_docker_volumes_custom: []-prowlarr_docker_volumes: "{{ prowlarr_docker_volumes_default- + prowlarr_docker_volumes_custom }}"--# Devices-prowlarr_docker_devices_default: []-prowlarr_docker_devices_custom: []-prowlarr_docker_devices: "{{ prowlarr_docker_devices_default- + prowlarr_docker_devices_custom }}"--# Hosts-prowlarr_docker_hosts_default: {}-prowlarr_docker_hosts_custom: {}-prowlarr_docker_hosts: "{{ docker_hosts_common- | combine(prowlarr_docker_hosts_default)- | combine(prowlarr_docker_hosts_custom) }}"+prowlarr_role_docker_volumes_default:+ - "{{ prowlarr_role_paths_location }}:/config"+prowlarr_role_docker_volumes_custom: []+prowlarr_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='prowlarr')+ + lookup('role_var', '_docker_volumes_custom', role='prowlarr') }}" # Labels-prowlarr_docker_labels_default: {}-prowlarr_docker_labels_custom: {}-prowlarr_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', prowlarr_name + '_docker_labels_default', default=prowlarr_docker_labels_default))- | combine((traefik_themepark_labels- if (prowlarr_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', prowlarr_name + '_docker_labels_custom', default=prowlarr_docker_labels_custom)) }}"+prowlarr_role_docker_labels_default: {}+prowlarr_role_docker_labels_custom: {}+prowlarr_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='prowlarr')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='prowlarr') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='prowlarr')) }}" # Hostname-prowlarr_docker_hostname: "{{ prowlarr_name }}"+prowlarr_role_docker_hostname: "{{ prowlarr_name }}" # Networks-prowlarr_docker_networks_alias: "{{ prowlarr_name }}"-prowlarr_docker_networks_default: []-prowlarr_docker_networks_custom: []-prowlarr_docker_networks: "{{ docker_networks_common- + prowlarr_docker_networks_default- + prowlarr_docker_networks_custom }}"--# Capabilities-prowlarr_docker_capabilities_default: []-prowlarr_docker_capabilities_custom: []-prowlarr_docker_capabilities: "{{ prowlarr_docker_capabilities_default- + prowlarr_docker_capabilities_custom }}"--# Security Opts-prowlarr_docker_security_opts_default: []-prowlarr_docker_security_opts_custom: []-prowlarr_docker_security_opts: "{{ prowlarr_docker_security_opts_default- + prowlarr_docker_security_opts_custom }}"+prowlarr_role_docker_networks_alias: "{{ prowlarr_name }}"+prowlarr_role_docker_networks_default: []+prowlarr_role_docker_networks_custom: []+prowlarr_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='prowlarr')+ + lookup('role_var', '_docker_networks_custom', role='prowlarr') }}" # Restart Policy-prowlarr_docker_restart_policy: unless-stopped+prowlarr_role_docker_restart_policy: unless-stopped # State-prowlarr_docker_state: started+prowlarr_role_docker_state: started
modified
roles/prowlarr/tasks/main.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -24,5 +24,5 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: "Tweak Settings when SSO is enabled"- ansible.builtin.import_tasks: "subtasks/auth.yml"- when: (prowlarr_traefik_sso_middleware | length > 0) and prowlarr_external_auth+ ansible.builtin.include_tasks: "subtasks/auth.yml"+ when: (lookup('role_var', '_traefik_sso_middleware', role='prowlarr') | length > 0) and lookup('role_var', '_external_auth', role='prowlarr')
modified
roles/prowlarr/tasks/subtasks/auth.yml
@@ -9,7 +9,7 @@ --- - name: Auth | Wait for 'config.xml' to be created ansible.builtin.wait_for:- path: "/opt/{{ prowlarr_name }}/config.xml"+ path: "{{ server_appdata_path }}/{{ prowlarr_name }}/config.xml" state: present - name: Auth | Wait for 10 seconds@@ -18,7 +18,7 @@ - name: Auth | Lookup AuthenticationMethod value community.general.xml:- path: "/opt/{{ prowlarr_name }}/config.xml"+ path: "{{ server_appdata_path }}/{{ prowlarr_name }}/config.xml" xpath: "/Config/AuthenticationMethod" content: "text" register: xmlresp@@ -28,7 +28,7 @@ block: - name: Auth | Change the 'AuthenticationMethod' attribute to 'External' community.general.xml:- path: "/opt/{{ prowlarr_name }}/config.xml"+ path: "{{ server_appdata_path }}/{{ prowlarr_name }}/config.xml" xpath: "/Config/AuthenticationMethod" value: "External"
modified
roles/python/tasks/main.yml
@@ -20,11 +20,11 @@ ansible.builtin.set_fact: expected_system_python: >- {{ '3.8'- if (ansible_distribution_version is version('20.04', '=='))+ if (ansible_facts['distribution_version'] is version('20.04', '==')) else '3.10'- if (ansible_distribution_version is version('22.04', '=='))+ if (ansible_facts['distribution_version'] is version('22.04', '==')) else '3.12'- if (ansible_distribution_version is version('24.04', '=='))+ if (ansible_facts['distribution_version'] is version('24.04', '==')) else 'unknown' }} - name: Assert system Python matches Ubuntu release@@ -33,11 +33,11 @@ - system_python_short == expected_system_python fail_msg: | System Python version mismatch detected!- Expected Python {{ expected_system_python }} for Ubuntu {{ ansible_distribution_version }}+ Expected Python {{ expected_system_python }} for Ubuntu {{ ansible_facts['distribution_version'] }} but found Python {{ system_python_short }}. The system Python version has been changed from the default. This role cannot safely proceed with cleanup.- success_msg: "System Python {{ system_python_short }} matches expected version {{ expected_system_python }} for Ubuntu {{ ansible_distribution_version }}"+ success_msg: "System Python {{ system_python_short }} matches expected version {{ expected_system_python }} for Ubuntu {{ ansible_facts['distribution_version'] }}" - name: Legacy Deadsnakes cleanup when:
modified
roles/qbittorrent/defaults/main.yml
@@ -11,220 +11,188 @@ ################################ qbittorrent_instances: ["qbittorrent"]-qbittorrent_host_install: false ################################ # Settings ################################ -qbittorrent_webui_custom_headers_enabled: "{{ 'true' if qbittorrent_themepark_enabled and global_themepark_plugin_enabled else 'false' }}"-qbittorrent_webui_custom_headers_default: "{{ (qbittorrent_themepark_headers if qbittorrent_themepark_enabled and global_themepark_plugin_enabled else '') + qbittorrent_webui_custom_headers_custom }}"-qbittorrent_webui_custom_headers_custom: ""-qbittorrent_webui_custom_headers: "{{ (qbittorrent_webui_custom_headers_default | trim + ' ' + qbittorrent_webui_custom_headers_custom | trim) | trim }}"+qbittorrent_role_host_install: false+qbittorrent_role_webui_custom_headers_enabled: "{{ 'true' if lookup('role_var', '_themepark_enabled', role='qbittorrent') and global_themepark_plugin_enabled else 'false' }}"+qbittorrent_role_webui_custom_headers_default: "{{ (qbittorrent_role_themepark_headers if lookup('role_var', '_themepark_enabled', role='qbittorrent') and global_themepark_plugin_enabled else '') }}"+qbittorrent_role_webui_custom_headers_custom: ""+qbittorrent_role_webui_custom_headers: "{{ (lookup('role_var', '_webui_custom_headers_default', role='qbittorrent') | trim + ' ' + lookup('role_var', '_webui_custom_headers_custom', role='qbittorrent') | trim) | trim }}" # Options are: Delete or MoveToTrash-qbittorrent_torrent_content_remove_option: "Delete"+qbittorrent_role_torrent_content_remove_option: "Delete" ################################ # Host Install ################################ # Options are: libtorrent1 (latest), libtorrent2 (latest) or legacy (4.3.9)-qbittorrent_host_branch: libtorrent1+qbittorrent_role_host_branch: libtorrent1 # Example being "release-4.4.5_v1.2.18" # If this is set then the above branch logic is ignored-qbittorrent_host_specific_version: ""+qbittorrent_role_host_specific_version: "" # Lookup variables-qbittorrent_host_download_endpoint: "{{ 'https://github.com/userdocs/qbittorrent-nox-static/releases/download/'- if qbittorrent_host_branch != 'legacy'- else 'https://github.com/userdocs/qbittorrent-nox-static-legacy/releases/download/' }}"-qbittorrent_host_download_url: "{{ qbittorrent_host_download_endpoint }}{{ qbittorrent_host_specific_version- if (qbittorrent_host_specific_version | length > 0)- else qbittorret_release_version.stdout }}/x86_64-qbittorrent-nox"-qbittorrent_host_release_url: "{{ 'https://github.com/userdocs/qbittorrent-nox-static/releases/latest/download/dependency-version.json'- if qbittorrent_host_branch != 'legacy'- else 'https://github.com/userdocs/qbittorrent-nox-static-legacy/releases/latest/download/dependency-version.json' }}"-qbittorrent_host_lookup_libtorrent1: 'release-\(.qbittorrent)_v\(.libtorrent_1_2)'-qbittorrent_host_lookup_libtorrent2: 'release-\(.qbittorrent)_v\(.libtorrent_2_0)'-qbittorrent_host_release_lookup: "{{ qbittorrent_host_lookup_libtorrent2- if qbittorrent_host_branch == 'libtorrent2'- else qbittorrent_host_lookup_libtorrent1 }}"-qbittorrent_host_version: |- curl -sL {{ qbittorrent_host_release_url }} | jq -r '. | "{{ qbittorrent_host_release_lookup }}"'+qbittorrent_role_host_download_endpoint: "{{ 'https://github.com/userdocs/qbittorrent-nox-static/releases/download/'+ if lookup('role_var', '_host_branch', role='qbittorrent') != 'legacy'+ else 'https://github.com/userdocs/qbittorrent-nox-static-legacy/releases/download/' }}"+qbittorrent_role_host_download_url: "{{ lookup('role_var', '_host_download_endpoint', role='qbittorrent') }}{{ lookup('role_var', '_host_specific_version', role='qbittorrent')+ if (lookup('role_var', '_host_specific_version', role='qbittorrent') | length > 0)+ else qbittorrent_release_version.stdout }}/x86_64-qbittorrent-nox"+qbittorrent_role_host_release_url: "{{ 'https://github.com/userdocs/qbittorrent-nox-static/releases/latest/download/dependency-version.json'+ if lookup('role_var', '_host_branch', role='qbittorrent') != 'legacy'+ else 'https://github.com/userdocs/qbittorrent-nox-static-legacy/releases/latest/download/dependency-version.json' }}"+qbittorrent_role_host_lookup_libtorrent1: 'release-\(.qbittorrent)_v\(.libtorrent_1_2)'+qbittorrent_role_host_lookup_libtorrent2: 'release-\(.qbittorrent)_v\(.libtorrent_2_0)'+qbittorrent_role_host_release_lookup: "{{ qbittorrent_role_host_lookup_libtorrent2+ if lookup('role_var', '_host_branch', role='qbittorrent') == 'libtorrent2'+ else qbittorrent_role_host_lookup_libtorrent1 }}"+qbittorrent_role_host_version: |+ curl -sL {{ lookup('role_var', '_host_release_url', role='qbittorrent') }} | jq -r '. | "{{ lookup('role_var', '_host_release_lookup', role='qbittorrent') }}"' -qbittorent_service_name: "saltbox_managed_{{ qbittorrent_name }}.service"+qbittorrent_role_service_name: "saltbox_managed_{{ qbittorrent_name }}.service"+qbittorrent_role_service_after: "network-online.target docker.service"+qbittorrent_role_service_requires: "network-online.target docker.service"+qbittorrent_role_service_wants: ""+qbittorrent_role_service_partof: "docker.service" ################################ # Paths ################################ -qbittorrent_paths_folder: "{{ qbittorrent_name }}"-qbittorrent_paths_location: "{{ server_appdata_path }}/{{ qbittorrent_paths_folder }}"-qbittorrent_paths_downloads_location: "{{ downloads_torrents_path }}/{{ qbittorrent_paths_folder }}"-qbittorrent_paths_conf: "{{ qbittorrent_paths_location }}/qBittorrent/qBittorrent.conf"-qbittorrent_paths_folders_list:- - "{{ qbittorrent_paths_location }}"- - "{{ qbittorrent_paths_downloads_location }}"- - "{{ qbittorrent_paths_downloads_location }}/completed"- - "{{ qbittorrent_paths_downloads_location }}/incoming"- - "{{ qbittorrent_paths_downloads_location }}/watched"- - "{{ qbittorrent_paths_downloads_location }}/torrents"+qbittorrent_role_paths_folder: "{{ qbittorrent_name }}"+qbittorrent_role_paths_location: "{{ server_appdata_path }}/{{ qbittorrent_role_paths_folder }}"+qbittorrent_role_paths_downloads_location: "{{ downloads_torrents_path }}/{{ qbittorrent_role_paths_folder }}"+qbittorrent_role_paths_conf: "{{ qbittorrent_role_paths_location }}/qBittorrent/qBittorrent.conf"+qbittorrent_role_paths_folders_list:+ - "{{ qbittorrent_role_paths_location }}"+ - "{{ qbittorrent_role_paths_downloads_location }}"+ - "{{ qbittorrent_role_paths_downloads_location }}/completed"+ - "{{ qbittorrent_role_paths_downloads_location }}/incoming"+ - "{{ qbittorrent_role_paths_downloads_location }}/watched"+ - "{{ qbittorrent_role_paths_downloads_location }}/torrents" ################################ # Web ################################ -qbittorrent_web_subdomain: "{{ qbittorrent_name }}"-qbittorrent_web_domain: "{{ user.domain }}"-qbittorrent_web_port: "8080"-qbittorrent_web_url: "{{ 'https://' + (lookup('vars', qbittorrent_name + '_web_subdomain', default=qbittorrent_web_subdomain) + '.' + lookup('vars', qbittorrent_name + '_web_domain', default=qbittorrent_web_domain)- if (lookup('vars', qbittorrent_name + '_web_subdomain', default=qbittorrent_web_subdomain) | length > 0)- else lookup('vars', qbittorrent_name + '_web_domain', default=qbittorrent_web_domain)) }}"+qbittorrent_role_web_subdomain: "{{ qbittorrent_name }}"+qbittorrent_role_web_domain: "{{ user.domain }}"+qbittorrent_role_web_port: "8080"+qbittorrent_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='qbittorrent') + '.' + lookup('role_var', '_web_domain', role='qbittorrent')+ if (lookup('role_var', '_web_subdomain', role='qbittorrent') | length > 0)+ else lookup('role_var', '_web_domain', role='qbittorrent')) }}" ################################ # DNS ################################ -qbittorrent_dns_record: "{{ lookup('vars', qbittorrent_name + '_web_subdomain', default=qbittorrent_web_subdomain) }}"-qbittorrent_dns_zone: "{{ lookup('vars', qbittorrent_name + '_web_domain', default=qbittorrent_web_domain) }}"-qbittorrent_dns_proxy: "{{ dns.proxied }}"+qbittorrent_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='qbittorrent') }}"+qbittorrent_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='qbittorrent') }}"+qbittorrent_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -qbittorrent_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-qbittorrent_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', qbittorrent_name + '_name', default=qbittorrent_name)- if (qbittorrent_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-qbittorrent_traefik_middleware_custom: ""-qbittorrent_traefik_certresolver: "{{ traefik_default_certresolver }}"-qbittorrent_traefik_enabled: true-qbittorrent_traefik_api_enabled: true-qbittorrent_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/command`) || PathPrefix(`/query`) || PathPrefix(`/login`) || PathPrefix(`/sync`)"+qbittorrent_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+qbittorrent_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + qbittorrent_name+ if (lookup('role_var', '_themepark_enabled', role='qbittorrent') and global_themepark_plugin_enabled)+ else '') }}"+qbittorrent_role_traefik_middleware_custom: ""+qbittorrent_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+qbittorrent_role_traefik_enabled: true+qbittorrent_role_traefik_api_enabled: true+qbittorrent_role_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/command`) || PathPrefix(`/query`) || PathPrefix(`/login`) || PathPrefix(`/sync`)" ################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-qbittorrent_themepark_enabled: false-qbittorrent_themepark_app: "qbittorrent"-qbittorrent_themepark_theme: "{{ global_themepark_theme }}"-qbittorrent_themepark_domain: "{{ global_themepark_domain }}"-qbittorrent_themepark_addons: []-qbittorrent_themepark_headers: "\"content-security-policy: default-src 'self'; style-src 'self' 'unsafe-inline' theme-park.dev raw.githubusercontent.com use.fontawesome.com; img-src 'self' theme-park.dev raw.githubusercontent.com data:; script-src 'self' 'unsafe-inline'; object-src 'none'; form-action 'self'; frame-ancestors 'self'; font-src use.fontawesome.com;\""+qbittorrent_role_themepark_enabled: false+qbittorrent_role_themepark_app: "qbittorrent"+qbittorrent_role_themepark_theme: "{{ global_themepark_theme }}"+qbittorrent_role_themepark_domain: "{{ global_themepark_domain }}"+qbittorrent_role_themepark_addons: []+qbittorrent_role_themepark_headers: "\"content-security-policy: default-src 'self'; style-src 'self' 'unsafe-inline' theme-park.dev raw.githubusercontent.com use.fontawesome.com; img-src 'self' theme-park.dev raw.githubusercontent.com data:; script-src 'self' 'unsafe-inline'; object-src 'none'; form-action 'self'; frame-ancestors 'self'; font-src use.fontawesome.com;\"" ################################ # Docker ################################ # Container-qbittorrent_docker_container: "{{ qbittorrent_name }}"+qbittorrent_role_docker_container: "{{ qbittorrent_name }}" # Image-qbittorrent_docker_image_pull: true-qbittorrent_docker_image_repo: "saltydk/qbittorrent"-qbittorrent_docker_image_tag: "latest"-qbittorrent_docker_image: "{{ lookup('vars', qbittorrent_name + '_docker_image_repo', default=qbittorrent_docker_image_repo)- + ':' + lookup('vars', qbittorrent_name + '_docker_image_tag', default=qbittorrent_docker_image_tag) }}"+qbittorrent_role_docker_image_pull: true+qbittorrent_role_docker_image_repo: "saltydk/qbittorrent"+qbittorrent_role_docker_image_tag: "latest"+qbittorrent_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='qbittorrent') }}:{{ lookup('role_var', '_docker_image_tag', role='qbittorrent') }}" # Ports-qbittorrent_docker_ports_56881: "{{ port_lookup_56881.meta.port- if (port_lookup_56881.meta.port is defined) and (port_lookup_56881.meta.port | trim | length > 0)- else '56881' }}"-qbittorrent_docker_ports_8080: "{{ port_lookup_8080.meta.port- if (port_lookup_8080.meta.port is defined) and (port_lookup_8080.meta.port | trim | length > 0)- else '8090' }}"-qbittorrent_web_port_lookup: "{{ lookup('vars', qbittorrent_name + '_web_port', default=qbittorrent_web_port) }}"+qbittorrent_role_docker_ports_56881: "{{ port_lookup_56881.meta.port+ if (port_lookup_56881.meta.port is defined) and (port_lookup_56881.meta.port | trim | length > 0)+ else '56881' }}"+qbittorrent_role_docker_ports_8080: "{{ port_lookup_8080.meta.port+ if (port_lookup_8080.meta.port is defined) and (port_lookup_8080.meta.port | trim | length > 0)+ else '8090' }}"+qbittorrent_role_web_port_lookup: "{{ lookup('role_var', '_web_port', role='qbittorrent') }}" -qbittorrent_docker_ports_defaults:- - "{{ lookup('vars', qbittorrent_name + '_docker_ports_56881', default=qbittorrent_docker_ports_56881) }}:{{ lookup('vars', qbittorrent_name + '_docker_ports_56881', default=qbittorrent_docker_ports_56881) }}"- - "{{ lookup('vars', qbittorrent_name + '_docker_ports_56881', default=qbittorrent_docker_ports_56881) }}:{{ lookup('vars', qbittorrent_name + '_docker_ports_56881', default=qbittorrent_docker_ports_56881) }}/udp"-qbittorrent_docker_ports_custom: []-qbittorrent_docker_ports: "{{ lookup('vars', qbittorrent_name + '_docker_ports_defaults', default=qbittorrent_docker_ports_defaults)- + lookup('vars', qbittorrent_name + '_docker_ports_custom', default=qbittorrent_docker_ports_custom) }}"+qbittorrent_role_docker_ports_default:+ - "{{ lookup('role_var', '_docker_ports_56881', role='qbittorrent') }}:{{ lookup('role_var', '_docker_ports_56881', role='qbittorrent') }}"+ - "{{ lookup('role_var', '_docker_ports_56881', role='qbittorrent') }}:{{ lookup('role_var', '_docker_ports_56881', role='qbittorrent') }}/udp"+qbittorrent_role_docker_ports_custom: []+qbittorrent_role_docker_ports: "{{ lookup('role_var', '_docker_ports_default', role='qbittorrent')+ + lookup('role_var', '_docker_ports_custom', role='qbittorrent') }}" # Envs-qbittorrent_docker_envs_default:+qbittorrent_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" TZ: "{{ tz }}" UMASK_SET: "002"-qbittorrent_docker_envs_custom: {}-qbittorrent_docker_envs: "{{ lookup('vars', qbittorrent_name + '_docker_envs_default', default=qbittorrent_docker_envs_default)- | combine(lookup('vars', qbittorrent_name + '_docker_envs_custom', default=qbittorrent_docker_envs_custom)) }}"--# Commands-qbittorrent_docker_commands_default: []-qbittorrent_docker_commands_custom: []-qbittorrent_docker_commands: "{{ lookup('vars', qbittorrent_name + '_docker_commands_default', default=qbittorrent_docker_commands_default)- + lookup('vars', qbittorrent_name + '_docker_commands_custom', default=qbittorrent_docker_commands_custom) }}"+ S6_SERVICES_GRACETIME: "{{ (lookup('role_var', '_docker_stop_timeout', role='qbittorrent') | int * 1000) | string }}"+qbittorrent_role_docker_envs_custom: {}+qbittorrent_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='qbittorrent')+ | combine(lookup('role_var', '_docker_envs_custom', role='qbittorrent')) }}" # Volumes-qbittorrent_docker_volumes_default:- - "{{ qbittorrent_paths_location }}:/config"+qbittorrent_role_docker_volumes_default:+ - "{{ qbittorrent_role_paths_location }}:/config" - "{{ server_appdata_path }}/scripts:/scripts"-qbittorrent_docker_volumes_custom: []-qbittorrent_docker_volumes: "{{ lookup('vars', qbittorrent_name + '_docker_volumes_default', default=qbittorrent_docker_volumes_default)- + lookup('vars', qbittorrent_name + '_docker_volumes_custom', default=qbittorrent_docker_volumes_custom) }}"--# Devices-qbittorrent_docker_devices_default: []-qbittorrent_docker_devices_custom: []-qbittorrent_docker_devices: "{{ lookup('vars', qbittorrent_name + '_docker_devices_default', default=qbittorrent_docker_devices_default)- + lookup('vars', qbittorrent_name + '_docker_devices_custom', default=qbittorrent_docker_devices_custom) }}"--# Hosts-qbittorrent_docker_hosts_default: {}-qbittorrent_docker_hosts_custom: {}-qbittorrent_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', qbittorrent_name + '_docker_hosts_default', default=qbittorrent_docker_hosts_default))- | combine(lookup('vars', qbittorrent_name + '_docker_hosts_custom', default=qbittorrent_docker_hosts_custom)) }}"+qbittorrent_role_docker_volumes_custom: []+qbittorrent_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='qbittorrent')+ + lookup('role_var', '_docker_volumes_custom', role='qbittorrent') }}" # Labels-qbittorrent_docker_labels_default: {}-qbittorrent_docker_labels_custom: {}-qbittorrent_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', qbittorrent_name + '_docker_labels_default', default=qbittorrent_docker_labels_default))- | combine((traefik_themepark_labels- if (qbittorrent_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', qbittorrent_name + '_docker_labels_custom', default=qbittorrent_docker_labels_custom)) }}"+qbittorrent_role_docker_labels_default: {}+qbittorrent_role_docker_labels_custom: {}+qbittorrent_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='qbittorrent')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='qbittorrent') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='qbittorrent')) }}" # Hostname-qbittorrent_docker_hostname: "{{ qbittorrent_name }}"--# Network Mode-qbittorrent_docker_network_mode_default: "{{ docker_networks_name_common }}"-qbittorrent_docker_network_mode: "{{ lookup('vars', qbittorrent_name + '_docker_network_mode_default', default=qbittorrent_docker_network_mode_default) }}"+qbittorrent_role_docker_hostname: "{{ qbittorrent_name }}" # Networks-qbittorrent_docker_networks_alias: "{{ qbittorrent_name }}"-qbittorrent_docker_networks_default: []-qbittorrent_docker_networks_custom: []-qbittorrent_docker_networks: "{{ docker_networks_common- + lookup('vars', qbittorrent_name + '_docker_networks_default', default=qbittorrent_docker_networks_default)- + lookup('vars', qbittorrent_name + '_docker_networks_custom', default=qbittorrent_docker_networks_custom) }}"--# Capabilities-qbittorrent_docker_capabilities_default: []-qbittorrent_docker_capabilities_custom: []-qbittorrent_docker_capabilities: "{{ lookup('vars', qbittorrent_name + '_docker_capabilities_default', default=qbittorrent_docker_capabilities_default)- + lookup('vars', qbittorrent_name + '_docker_capabilities_custom', default=qbittorrent_docker_capabilities_custom) }}"--# Security Opts-qbittorrent_docker_security_opts_default: []-qbittorrent_docker_security_opts_custom: []-qbittorrent_docker_security_opts: "{{ lookup('vars', qbittorrent_name + '_docker_security_opts_default', default=qbittorrent_docker_security_opts_default)- + lookup('vars', qbittorrent_name + '_docker_security_opts_custom', default=qbittorrent_docker_security_opts_custom) }}"+qbittorrent_role_docker_networks_alias: "{{ qbittorrent_name }}"+qbittorrent_role_docker_networks_default: []+qbittorrent_role_docker_networks_custom: []+qbittorrent_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='qbittorrent')+ + lookup('role_var', '_docker_networks_custom', role='qbittorrent') }}" # Restart Policy-qbittorrent_docker_restart_policy: unless-stopped+qbittorrent_role_docker_restart_policy: unless-stopped # Stop Timeout-qbittorrent_docker_stop_timeout: 900+qbittorrent_role_docker_stop_timeout: 900 # State-qbittorrent_docker_state: started+qbittorrent_role_docker_state: started
modified
roles/qbittorrent/tasks/main2.yml
@@ -15,8 +15,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -30,43 +31,42 @@ ansible.builtin.include_tasks: subtasks/legacy.yml when: qbittorrent_legacy_service.stat.exists -- name: Check if '{{ qbittorent_service_name }}' exists+- name: Check if '{{ qbittorrent_role_service_name }}' exists ansible.builtin.stat:- path: "/etc/systemd/system/{{ qbittorent_service_name }}"+ path: "/etc/systemd/system/{{ qbittorrent_role_service_name }}" register: qbittorrent_service -- name: Stop '{{ qbittorent_service_name }}'+- name: Stop '{{ qbittorrent_role_service_name }}' ansible.builtin.systemd_service:- name: "{{ qbittorent_service_name }}"+ name: "{{ qbittorrent_role_service_name }}" state: stopped when: qbittorrent_service.stat.exists -- name: Remove '{{ qbittorent_service_name }}' file+- name: Remove '{{ qbittorrent_role_service_name }}' file ansible.builtin.file:- path: "/etc/systemd/system/{{ qbittorent_service_name }}"+ path: "/etc/systemd/system/{{ qbittorrent_role_service_name }}" state: absent- when: qbittorrent_service.stat.exists and not qbittorrent_host_install+ when: qbittorrent_service.stat.exists and (not lookup('role_var', '_host_install', role='qbittorrent')) - name: Remove '{{ qbittorrent_name }}-nox' file ansible.builtin.file: path: "/usr/bin/{{ qbittorrent_name }}-nox" state: absent- when: qbittorrent_service.stat.exists and not qbittorrent_host_install+ when: qbittorrent_service.stat.exists and (not lookup('role_var', '_host_install', role='qbittorrent')) - name: Create directories ansible.builtin.include_tasks: "{{ resources_tasks_path }}/directories/create_directories.yml" - name: Check if config file exists ansible.builtin.stat:- path: "{{ qbittorrent_paths_conf }}"- register: qbittorrent_paths_conf_stat+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}"+ register: qbittorrent_role_paths_conf_stat - name: Pre-Install Tasks- ansible.builtin.import_tasks: "subtasks/pre-install/main.yml"- when: (not continuous_integration)+ ansible.builtin.include_tasks: "subtasks/pre-install/main.yml" - name: Host Install- when: qbittorrent_host_install+ when: lookup('role_var', '_host_install', role='qbittorrent') block: - name: Add '{{ qbittorrent_name }}' to hosts ansible.builtin.blockinfile:@@ -76,12 +76,12 @@ 172.19.0.1 {{ qbittorrent_name }} - name: Get Download Url- ansible.builtin.shell: "{{ qbittorrent_host_version }}"- register: qbittorret_release_version+ ansible.builtin.shell: "{{ lookup('role_var', '_host_version', role='qbittorrent') }}"+ register: qbittorrent_release_version - name: Download 'qbittorrent-nox' ansible.builtin.get_url:- url: "{{ qbittorrent_host_download_url }}"+ url: "{{ lookup('role_var', '_host_download_url', role='qbittorrent') }}" dest: "/usr/bin/{{ qbittorrent_name }}-nox" mode: "0755" force: true@@ -89,25 +89,25 @@ - name: Import '{{ qbittorrent_name }}.service' ansible.builtin.template: src: "qbittorrent.service.j2"- dest: "/etc/systemd/system/{{ qbittorent_service_name }}"+ dest: "/etc/systemd/system/{{ qbittorrent_role_service_name }}" mode: "0644" force: true - - name: Systemd daemon-reload '{{ qbittorent_service_name }}'+ - name: Systemd daemon-reload '{{ qbittorrent_role_service_name }}' ansible.builtin.systemd_service:- name: "{{ qbittorent_service_name }}"+ name: "{{ qbittorrent_role_service_name }}" state: stopped enabled: false daemon_reload: true - - name: Start '{{ qbittorent_service_name }}'+ - name: Start '{{ qbittorrent_role_service_name }}' ansible.builtin.systemd_service:- name: "{{ qbittorent_service_name }}"+ name: "{{ qbittorrent_role_service_name }}" state: started enabled: true - name: Docker Install- when: not qbittorrent_host_install+ when: not lookup('role_var', '_host_install', role='qbittorrent') block: - name: Remove '{{ qbittorrent_name }}' from hosts ansible.builtin.blockinfile:@@ -119,5 +119,5 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: Post-Install Tasks- ansible.builtin.import_tasks: "subtasks/post-install/main.yml"- when: (not continuous_integration) and (not qbittorrent_paths_conf_stat.stat.exists)+ ansible.builtin.include_tasks: "subtasks/post-install/main.yml"+ when: (not qbittorrent_role_paths_conf_stat.stat.exists)
modified
roles/qbittorrent/tasks/subtasks/post-install/main.yml
@@ -9,7 +9,7 @@ --- - name: Post-Install | Wait for config to be created ansible.builtin.wait_for:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" state: present - name: Post-Install | Wait for 30 seconds@@ -18,25 +18,25 @@ - name: Post-Install | Stop container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/stop_docker_container.yml"- when: not qbittorrent_host_install+ when: not lookup('role_var', '_host_install', role='qbittorrent') -- name: Post-Install | Stop '{{ qbittorent_service_name }}'+- name: Post-Install | Stop '{{ lookup('role_var', '_service_name', role='qbittorrent') }}' ansible.builtin.systemd_service:- name: "{{ qbittorent_service_name }}"+ name: "{{ lookup('role_var', '_service_name', role='qbittorrent') }}" state: stopped enabled: true- when: qbittorrent_host_install+ when: lookup('role_var', '_host_install', role='qbittorrent') - name: Post-Install | Settings Task- ansible.builtin.import_tasks: "settings/main.yml"+ ansible.builtin.include_tasks: "settings/main.yml" - name: Post-Install | Start container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_docker_container.yml"- when: not qbittorrent_host_install+ when: not lookup('role_var', '_host_install', role='qbittorrent') -- name: Post-Install | Start '{{ qbittorent_service_name }}'+- name: Post-Install | Start '{{ lookup('role_var', '_service_name', role='qbittorrent') }}' ansible.builtin.systemd_service:- name: "{{ qbittorent_service_name }}"+ name: "{{ lookup('role_var', '_service_name', role='qbittorrent') }}" state: started enabled: true- when: qbittorrent_host_install+ when: lookup('role_var', '_host_install', role='qbittorrent')
modified
roles/qbittorrent/tasks/subtasks/post-install/settings/main.yml
@@ -9,10 +9,10 @@ --- - name: Post-Install | Settings | Update Session\Port in 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: BitTorrent option: Session\Port- value: "{{ lookup('vars', qbittorrent_name + '_docker_ports_56881', default=qbittorrent_docker_ports_56881) }}"+ value: "{{ lookup('role_var', '_docker_ports_56881', role='qbittorrent') }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -21,10 +21,10 @@ - name: Post-Install | Settings | Update Connection\PortRangeMin in 'qBittorrent.conf' config settings (legacy) community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: Connection\PortRangeMin- value: "{{ lookup('vars', qbittorrent_name + '_docker_ports_56881', default=qbittorrent_docker_ports_56881) }}"+ value: "{{ lookup('role_var', '_docker_ports_56881', role='qbittorrent') }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -33,7 +33,7 @@ - name: Post-Install | Settings | Update WebUI\HostHeaderValidation 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\HostHeaderValidation value: "false"@@ -45,7 +45,7 @@ - name: Post-Install | Settings | Update WebUI\CSRFProtection 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\CSRFProtection value: "false"@@ -57,10 +57,10 @@ - name: Post-Install | Settings | Update WebUI\CustomHTTPHeaders 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\CustomHTTPHeaders- value: "{{ qbittorrent_webui_custom_headers }}"+ value: "{{ lookup('role_var', '_webui_custom_headers', role='qbittorrent') }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -69,10 +69,10 @@ - name: Post-Install | Settings | Update WebUI\CustomHTTPHeadersEnabled 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\CustomHTTPHeadersEnabled- value: "{{ qbittorrent_webui_custom_headers_enabled }}"+ value: "{{ lookup('role_var', '_webui_custom_headers_enabled', role='qbittorrent') }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -81,7 +81,7 @@ - name: Post-Install | Settings | Update LegalNotice.Accepted 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: LegalNotice option: Accepted value: "true"@@ -93,10 +93,10 @@ - name: Post-Install | Settings | Update WebUI\Port 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\Port- value: "{{ lookup('vars', qbittorrent_name + '_docker_ports_8080', default=qbittorrent_docker_ports_8080) if qbittorrent_host_install else '8080' }}"+ value: "{{ lookup('role_var', '_docker_ports_8080', role='qbittorrent') if lookup('role_var', '_host_install', role='qbittorrent') else '8080' }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -105,10 +105,10 @@ - name: Post-Install | Settings | Update WebUI\Port 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\Address- value: "{{ '172.19.0.1' if qbittorrent_host_install else '*' }}"+ value: "{{ '172.19.0.1' if lookup('role_var', '_host_install', role='qbittorrent') else '*' }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -117,7 +117,7 @@ - name: Post-Install | Settings | Update WebUI\TrustedReverseProxiesList 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\TrustedReverseProxiesList value: "172.19.0.0/16"@@ -129,7 +129,7 @@ - name: Post-Install | Settings | Update WebUI\ReverseProxySupportEnabled 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\ReverseProxySupportEnabled value: "true"@@ -141,7 +141,7 @@ - name: Post-Install | Settings | Update FileLogger\Enabled 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Application option: FileLogger\Enabled value: "true"@@ -153,10 +153,10 @@ - name: Post-Install | Settings | Update FileLogger\Path 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Application option: FileLogger\Path- value: "{{ qbittorrent_paths_location + '/log' if qbittorrent_host_install else '/config/log' }}"+ value: "{{ lookup('role_var', '_paths_location', role='qbittorrent') + '/log' if lookup('role_var', '_host_install', role='qbittorrent') else '/config/log' }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -164,19 +164,20 @@ mode: "0664" - name: Post-Install | Settings | Generate Password Hash- when: not qbittorrent_paths_conf_stat.stat.exists+ when: not qbittorrent_role_paths_conf_stat.stat.exists block: - name: Post-Install | Settings | Generate Password Hash qbittorrent_passwd: password: "{{ user.pass }}" register: qbittorrent_hashed_passwd+ no_log: true - name: Post-Install | Settings | Set qBittorrent 'WebUI\Username' community.general.ini_file:+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\Username value: '{{ user.name }}'- path: "{{ qbittorrent_paths_conf }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -185,10 +186,10 @@ - name: Post-Install | Settings | Set qBittorrent 'WebUI\Password_PBKDF2' community.general.ini_file:+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\Password_PBKDF2- value: '{{ qbittorrent_hashed_passwd.msg }}'- path: "{{ qbittorrent_paths_conf }}"+ value: '{{ qbittorrent_hashed_passwd.hash }}' no_extra_spaces: true state: present owner: "{{ user.name }}"
modified
roles/qbittorrent/tasks/subtasks/pre-install/main.yml
@@ -9,7 +9,7 @@ --- - name: Pre-Install | Remove 'qbittorrent.yml' file ansible.builtin.file:- path: "/opt/traefik/{{ qbittorrent_name }}.yml"+ path: "{{ server_appdata_path }}/traefik/{{ qbittorrent_name }}.yml" state: absent - name: Pre-Install | Initialize or update port range low bound@@ -26,7 +26,7 @@ - name: Pre-Install | Update port range low bound for next iteration ansible.builtin.set_fact:- port_range_low_bound: "{{ (lookup('vars', qbittorrent_name + '_docker_ports_56881', default=qbittorrent_docker_ports_56881) | int) + 1 }}"+ port_range_low_bound: "{{ (lookup('role_var', '_docker_ports_56881', role='qbittorrent') | int) + 1 }}" - name: Pre-Install | Get next available port within the range of '8090-8100' # noqa fqcn[action] find_open_port:@@ -37,14 +37,14 @@ ignore_errors: true - name: Pre-Install | Settings | Update 'qBittorrent.conf' config settings- when: qbittorrent_paths_conf_stat.stat.exists+ when: qbittorrent_role_paths_conf_stat.stat.exists block: - name: Pre-Install | Settings | Update Session\Port in 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: BitTorrent option: Session\Port- value: "{{ lookup('vars', qbittorrent_name + '_docker_ports_56881', default=qbittorrent_docker_ports_56881) }}"+ value: "{{ lookup('role_var', '_docker_ports_56881', role='qbittorrent') }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -53,10 +53,10 @@ - name: Pre-Install | Settings | Update Session\TorrentContentRemoveOption in 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: BitTorrent option: Session\TorrentContentRemoveOption- value: "{{ lookup('vars', qbittorrent_name + '_torrent_content_remove_option', default=qbittorrent_torrent_content_remove_option) }}"+ value: "{{ lookup('role_var', '_torrent_content_remove_option', role='qbittorrent') }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -65,10 +65,10 @@ - name: Pre-Install | Settings | Update Connection\PortRangeMin in 'qBittorrent.conf' config settings (legacy) community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: Connection\PortRangeMin- value: "{{ lookup('vars', qbittorrent_name + '_docker_ports_56881', default=qbittorrent_docker_ports_56881) }}"+ value: "{{ lookup('role_var', '_docker_ports_56881', role='qbittorrent') }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -77,7 +77,7 @@ - name: Pre-Install | Settings | Update WebUI\HostHeaderValidation 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\HostHeaderValidation value: "false"@@ -89,7 +89,7 @@ - name: Pre-Install | Settings | Update WebUI\CSRFProtection 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\CSRFProtection value: "false"@@ -101,10 +101,10 @@ - name: Pre-Install | Settings | Update WebUI\CustomHTTPHeaders 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\CustomHTTPHeaders- value: "{{ qbittorrent_webui_custom_headers }}"+ value: "{{ lookup('role_var', '_webui_custom_headers', role='qbittorrent') }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -113,10 +113,10 @@ - name: Pre-Install | Settings | Update WebUI\CustomHTTPHeadersEnabled 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\CustomHTTPHeadersEnabled- value: "{{ qbittorrent_webui_custom_headers_enabled }}"+ value: "{{ lookup('role_var', '_webui_custom_headers_enabled', role='qbittorrent') }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -125,7 +125,7 @@ - name: Pre-Install | Settings | Update LegalNotice.Accepted 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: LegalNotice option: Accepted value: "true"@@ -137,10 +137,10 @@ - name: Pre-Install | Settings | Update WebUI\Port 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\Port- value: "{{ qbittorrent_docker_ports_8080 if qbittorrent_host_install else lookup('vars', qbittorrent_name + '_web_port', default=qbittorrent_web_port) }}"+ value: "{{ lookup('role_var', '_docker_ports_8080', role='qbittorrent') if lookup('role_var', '_host_install', role='qbittorrent') else lookup('role_var', '_web_port', role='qbittorrent') }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -149,10 +149,10 @@ - name: Pre-Install | Settings | Update WebUI\Port 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\Address- value: "{{ '172.19.0.1' if qbittorrent_host_install else '*' }}"+ value: "{{ '172.19.0.1' if lookup('role_var', '_host_install', role='qbittorrent') else '*' }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -161,7 +161,7 @@ - name: Pre-Install | Settings | Update WebUI\TrustedReverseProxiesList 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\TrustedReverseProxiesList value: "172.19.0.0/16"@@ -173,7 +173,7 @@ - name: Pre-Install | Settings | Update WebUI\ReverseProxySupportEnabled 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Preferences option: WebUI\ReverseProxySupportEnabled value: "true"@@ -185,7 +185,7 @@ - name: Pre-Install | Settings | Update FileLogger\Enabled 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Application option: FileLogger\Enabled value: "true"@@ -197,10 +197,10 @@ - name: Pre-Install | Settings | Update FileLogger\Path 'qBittorrent.conf' config settings community.general.ini_file:- path: "{{ qbittorrent_paths_conf }}"+ path: "{{ lookup('role_var', '_paths_conf', role='qbittorrent') }}" section: Application option: FileLogger\Path- value: "{{ qbittorrent_paths_location + '/log' if qbittorrent_host_install else '/config/log' }}"+ value: "{{ lookup('role_var', '_paths_location', role='qbittorrent') + '/log' if lookup('role_var', '_host_install', role='qbittorrent') else '/config/log' }}" no_extra_spaces: true state: present owner: "{{ user.name }}"@@ -210,9 +210,9 @@ - name: Pre-Install | Import 'qbittorrent.yml' file ansible.builtin.template: src: "qbittorrent.yml.j2"- dest: "/opt/traefik/{{ qbittorrent_name }}.yml"+ dest: "{{ server_appdata_path }}/traefik/{{ qbittorrent_name }}.yml" owner: '{{ user.name }}' group: '{{ user.name }}' mode: "0644" force: true- when: qbittorrent_host_install+ when: lookup('role_var', '_host_install', role='qbittorrent')
modified
roles/qbittorrent/templates/qbittorrent.service.j2
@@ -10,8 +10,22 @@ [Unit] Description={{ qbittorrent_name }}-After=network-online.target docker.service-Requires=network-online.target docker.service+{% set service_after = lookup('role_var', '_service_after', role='qbittorrent') %}+{% if service_after | length > 0 %}+After={{ service_after }}+{% endif %}+{% set service_requires = lookup('role_var', '_service_requires', role='qbittorrent') %}+{% if service_requires | length > 0 %}+Requires={{ service_requires }}+{% endif %}+{% set service_wants = lookup('role_var', '_service_wants', role='qbittorrent') %}+{% if service_wants | length > 0 %}+Wants={{ service_wants }}+{% endif %}+{% set service_partof = lookup('role_var', '_service_partof', role='qbittorrent') %}+{% if service_partof | length > 0 %}+PartOf={{ service_partof }}+{% endif %} StartLimitIntervalSec=500s StartLimitBurst=5 @@ -19,9 +33,9 @@ User={{ user.name }} Group={{ user.name }} Type=exec-Environment="XDG_CONFIG_HOME={{ qbittorrent_paths_location }}" "XDG_DATA_HOME={{ qbittorrent_paths_location }}" "HOME={{ qbittorrent_paths_location }}"+Environment="XDG_CONFIG_HOME={{ lookup('role_var', '_paths_location', role='qbittorrent') }}" "XDG_DATA_HOME={{ lookup('role_var', '_paths_location', role='qbittorrent') }}" "HOME={{ lookup('role_var', '_paths_location', role='qbittorrent') }}" ExecStartPre=/bin/sleep 10-ExecStart=/usr/bin/{{ qbittorrent_name }}-nox --webui-port={{ qbittorrent_docker_ports_8080 }}+ExecStart=/usr/bin/{{ qbittorrent_name }}-nox --webui-port={{ lookup('role_var', '_docker_ports_8080', role='qbittorrent') }} AmbientCapabilities=CAP_NET_RAW TimeoutStopSec=1800 Restart=on-failure
modified
roles/qbittorrent/templates/qbittorrent.yml.j2
@@ -17,13 +17,13 @@ service: "{{ qbittorrent_name }}" tls: options: securetls@file- certResolver: {{ qbittorrent_traefik_certresolver }}-{% if lookup('vars', qbittorrent_name + '_traefik_api_enabled', default=qbittorrent_traefik_api_enabled) %}+ certResolver: {{ lookup('role_var', '_traefik_certresolver', role='qbittorrent') }}+{% if lookup('role_var', '_traefik_api_enabled', role='qbittorrent') %} {{ qbittorrent_name }}-api-http: entryPoints: - "web"- rule: "{{ traefik_host_template + ' && (' + lookup('vars', qbittorrent_name + '_traefik_api_endpoint', default=qbittorrent_traefik_api_endpoint) }})"+ rule: "{{ traefik_host_template + ' && (' + lookup('role_var', '_traefik_api_endpoint', role='qbittorrent') }})" middlewares: {{ traefik_default_middleware_http_api.split(',') | to_nice_yaml | trim | indent(8) }} service: "{{ qbittorrent_name }}"@@ -31,29 +31,29 @@ {{ qbittorrent_name }}-api: entryPoints: - "websecure"- rule: "{{ traefik_host_template + ' && (' + lookup('vars', qbittorrent_name + '_traefik_api_endpoint', default=qbittorrent_traefik_api_endpoint) }})"+ rule: "{{ traefik_host_template + ' && (' + lookup('role_var', '_traefik_api_endpoint', role='qbittorrent') }})" middlewares: {{ traefik_default_middleware_api.split(',') | to_nice_yaml | trim | indent(8) }} service: "{{ qbittorrent_name }}" tls: options: securetls@file- certResolver: {{ qbittorrent_traefik_certresolver }}+ certResolver: {{ lookup('role_var', '_traefik_certresolver', role='qbittorrent') }} {% endif %} services: {{ qbittorrent_name }}: loadBalancer: servers:- - url: "http://172.19.0.1:{{ qbittorrent_docker_ports_8080 }}"-{% if qbittorrent_themepark_enabled and global_themepark_plugin_enabled %}+ - url: "http://172.19.0.1:{{ lookup('role_var', '_docker_ports_8080', role='qbittorrent') }}"+{% if lookup('role_var', '_themepark_enabled', role='qbittorrent') and global_themepark_plugin_enabled %} middlewares:- themepark-{{ lookup("vars", qbittorrent_name + "_name", default=qbittorrent_name) }}:+ themepark-{{ qbittorrent_name }}: plugin: themepark:- app: "{{ qbittorrent_themepark_app }}"- theme: "{{ lookup("vars", qbittorrent_name + "_themepark_theme", default=qbittorrent_themepark_theme) }}"-{% if lookup("vars", qbittorrent_name + "_themepark_addons", default=qbittorrent_themepark_addons) | length > 0 %}- addons: "{{ lookup("vars", qbittorrent_name + "_themepark_addons", default=qbittorrent_themepark_addons) }}"+ app: "{{ qbittorrent_role_themepark_app }}"+ theme: "{{ lookup('role_var', '_themepark_theme', role='qbittorrent') }}"+{% if lookup('role_var', '_themepark_addons', role='qbittorrent') | length > 0 %}+ addons: "{{ lookup('role_var', '_themepark_addons', role='qbittorrent') }}" {% endif %} {% endif %}
modified
roles/radarr/defaults/main.yml
@@ -17,171 +17,121 @@ # Settings ################################ -radarr_external_auth: true+radarr_role_external_auth: true ################################ # Paths ################################ -radarr_paths_folder: "{{ radarr_name }}"-radarr_paths_location: "{{ server_appdata_path }}/{{ radarr_paths_folder }}"-radarr_paths_folders_list:- - "{{ radarr_paths_location }}"-radarr_paths_config_location: "{{ radarr_paths_location }}/config.xml"+radarr_role_paths_folder: "{{ radarr_name }}"+radarr_role_paths_location: "{{ server_appdata_path }}/{{ radarr_role_paths_folder }}"+radarr_role_paths_folders_list:+ - "{{ radarr_role_paths_location }}"+radarr_role_paths_config_location: "{{ radarr_role_paths_location }}/config.xml" ################################ # Web ################################ -radarr_web_subdomain: "{{ radarr_name }}"-radarr_web_domain: "{{ user.domain }}"-radarr_web_port: "7878"-radarr_web_url: "{{ 'https://' + (lookup('vars', radarr_name + '_web_subdomain', default=radarr_web_subdomain) + '.' + lookup('vars', radarr_name + '_web_domain', default=radarr_web_domain)- if (lookup('vars', radarr_name + '_web_subdomain', default=radarr_web_subdomain) | length > 0)- else lookup('vars', radarr_name + '_web_domain', default=radarr_web_domain)) }}"+radarr_role_web_subdomain: "{{ radarr_name }}"+radarr_role_web_domain: "{{ user.domain }}"+radarr_role_web_port: "7878"+radarr_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='radarr') + '.' + lookup('role_var', '_web_domain', role='radarr')+ if (lookup('role_var', '_web_subdomain', role='radarr') | length > 0)+ else lookup('role_var', '_web_domain', role='radarr')) }}" ################################ # DNS ################################ -radarr_dns_record: "{{ lookup('vars', radarr_name + '_web_subdomain', default=radarr_web_subdomain) }}"-radarr_dns_zone: "{{ lookup('vars', radarr_name + '_web_domain', default=radarr_web_domain) }}"-radarr_dns_proxy: "{{ dns.proxied }}"+radarr_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='radarr') }}"+radarr_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='radarr') }}"+radarr_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -radarr_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-radarr_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', radarr_name + '_name', default=radarr_name)- if (radarr_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-radarr_traefik_middleware_custom: ""-radarr_traefik_certresolver: "{{ traefik_default_certresolver }}"-radarr_traefik_enabled: true-radarr_traefik_api_enabled: true-radarr_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/feed`) || PathPrefix(`/ping`)"+radarr_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+radarr_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + radarr_name+ if (lookup('role_var', '_themepark_enabled', role='radarr') and global_themepark_plugin_enabled)+ else '') }}"+radarr_role_traefik_middleware_custom: ""+radarr_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+radarr_role_traefik_enabled: true+radarr_role_traefik_api_enabled: true+radarr_role_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/feed`) || PathPrefix(`/ping`)" ################################-# API-################################--# default to blank-radarr_api_key:--################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-radarr_themepark_enabled: false-radarr_themepark_app: "radarr"-radarr_themepark_theme: "{{ global_themepark_theme }}"-radarr_themepark_domain: "{{ global_themepark_domain }}"-radarr_themepark_addons: []+radarr_role_themepark_enabled: false+radarr_role_themepark_app: "radarr"+radarr_role_themepark_theme: "{{ global_themepark_theme }}"+radarr_role_themepark_domain: "{{ global_themepark_domain }}"+radarr_role_themepark_addons: [] ################################ # Docker ################################ # Container-radarr_docker_container: "{{ radarr_name }}"+radarr_role_docker_container: "{{ radarr_name }}" # Image-radarr_docker_image_pull: true-radarr_docker_image_repo: "ghcr.io/hotio/radarr"-radarr_docker_image_tag: "release"-radarr_docker_image: "{{ lookup('vars', radarr_name + '_docker_image_repo', default=radarr_docker_image_repo)- + ':' + lookup('vars', radarr_name + '_docker_image_tag', default=radarr_docker_image_tag) }}"--# Ports-radarr_docker_ports_defaults: []-radarr_docker_ports_custom: []-radarr_docker_ports: "{{ lookup('vars', radarr_name + '_docker_ports_defaults', default=radarr_docker_ports_defaults)- + lookup('vars', radarr_name + '_docker_ports_custom', default=radarr_docker_ports_custom) }}"+radarr_role_docker_image_pull: true+radarr_role_docker_image_repo: "ghcr.io/hotio/radarr"+radarr_role_docker_image_tag: "release"+radarr_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='radarr') }}:{{ lookup('role_var', '_docker_image_tag', role='radarr') }}" # Envs-radarr_docker_envs_default:+radarr_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" UMASK: "002" TZ: "{{ tz }}"-radarr_docker_envs_custom: {}-radarr_docker_envs: "{{ lookup('vars', radarr_name + '_docker_envs_default', default=radarr_docker_envs_default)- | combine(lookup('vars', radarr_name + '_docker_envs_custom', default=radarr_docker_envs_custom)) }}"--# Commands-radarr_docker_commands_default: []-radarr_docker_commands_custom: []-radarr_docker_commands: "{{ lookup('vars', radarr_name + '_docker_commands_default', default=radarr_docker_commands_default)- + lookup('vars', radarr_name + '_docker_commands_custom', default=radarr_docker_commands_custom) }}"+radarr_role_docker_envs_custom: {}+radarr_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='radarr')+ | combine(lookup('role_var', '_docker_envs_custom', role='radarr')) }}" # Volumes-radarr_docker_volumes_default:- - "{{ radarr_paths_location }}:/config"+radarr_role_docker_volumes_default:+ - "{{ radarr_role_paths_location }}:/config" - "{{ server_appdata_path }}/scripts:/scripts"-radarr_docker_volumes_legacy:+radarr_role_docker_volumes_legacy: - "/mnt/unionfs/Media/Movies:/movies"-radarr_docker_volumes_custom: []-radarr_docker_volumes: "{{ lookup('vars', radarr_name + '_docker_volumes_default', default=radarr_docker_volumes_default)- + lookup('vars', radarr_name + '_docker_volumes_custom', default=radarr_docker_volumes_custom)- + (lookup('vars', radarr_name + '_docker_volumes_legacy', default=radarr_docker_volumes_legacy)- if docker_legacy_volume- else []) }}"--# Devices-radarr_docker_devices_default: []-radarr_docker_devices_custom: []-radarr_docker_devices: "{{ lookup('vars', radarr_name + '_docker_devices_default', default=radarr_docker_devices_default)- + lookup('vars', radarr_name + '_docker_devices_custom', default=radarr_docker_devices_custom) }}"--# Hosts-radarr_docker_hosts_default: {}-radarr_docker_hosts_custom: {}-radarr_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', radarr_name + '_docker_hosts_default', default=radarr_docker_hosts_default))- | combine(lookup('vars', radarr_name + '_docker_hosts_custom', default=radarr_docker_hosts_custom)) }}"+radarr_role_docker_volumes_custom: []+radarr_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='radarr')+ + lookup('role_var', '_docker_volumes_custom', role='radarr')+ + (lookup('role_var', '_docker_volumes_legacy', role='radarr')+ if docker_legacy_volume+ else []) }}" # Labels-radarr_docker_labels_default: {}-radarr_docker_labels_custom: {}-radarr_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', radarr_name + '_docker_labels_default', default=radarr_docker_labels_default))- | combine((traefik_themepark_labels- if (radarr_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', radarr_name + '_docker_labels_custom', default=radarr_docker_labels_custom)) }}"+radarr_role_docker_labels_default: {}+radarr_role_docker_labels_custom: {}+radarr_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='radarr')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='radarr') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='radarr')) }}" # Hostname-radarr_docker_hostname: "{{ radarr_name }}"--# Network Mode-radarr_docker_network_mode_default: "{{ docker_networks_name_common }}"-radarr_docker_network_mode: "{{ lookup('vars', radarr_name + '_docker_network_mode_default', default=radarr_docker_network_mode_default) }}"+radarr_role_docker_hostname: "{{ radarr_name }}" # Networks-radarr_docker_networks_alias: "{{ radarr_name }}"-radarr_docker_networks_default: []-radarr_docker_networks_custom: []-radarr_docker_networks: "{{ docker_networks_common- + lookup('vars', radarr_name + '_docker_networks_default', default=radarr_docker_networks_default)- + lookup('vars', radarr_name + '_docker_networks_custom', default=radarr_docker_networks_custom) }}"--# Capabilities-radarr_docker_capabilities_default: []-radarr_docker_capabilities_custom: []-radarr_docker_capabilities: "{{ lookup('vars', radarr_name + '_docker_capabilities_default', default=radarr_docker_capabilities_default)- + lookup('vars', radarr_name + '_docker_capabilities_custom', default=radarr_docker_capabilities_custom) }}"--# Security Opts-radarr_docker_security_opts_default: []-radarr_docker_security_opts_custom: []-radarr_docker_security_opts: "{{ lookup('vars', radarr_name + '_docker_security_opts_default', default=radarr_docker_security_opts_default)- + lookup('vars', radarr_name + '_docker_security_opts_custom', default=radarr_docker_security_opts_custom) }}"+radarr_role_docker_networks_alias: "{{ radarr_name }}"+radarr_role_docker_networks_default: []+radarr_role_docker_networks_custom: []+radarr_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='radarr')+ + lookup('role_var', '_docker_networks_custom', role='radarr') }}" # Restart Policy-radarr_docker_restart_policy: unless-stopped+radarr_role_docker_restart_policy: unless-stopped # State-radarr_docker_state: started+radarr_role_docker_state: started
modified
roles/radarr/tasks/main2.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -24,5 +24,5 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: "Tweak Settings when SSO is enabled"- ansible.builtin.import_tasks: "subtasks/auth.yml"- when: (lookup('vars', radarr_name + '_traefik_sso_middleware', default=radarr_traefik_sso_middleware) | length > 0) and lookup('vars', radarr_name + '_external_auth', default=radarr_external_auth)+ ansible.builtin.include_tasks: "subtasks/auth.yml"+ when: (lookup('vars', radarr_name + '_traefik_sso_middleware', default=radarr_role_traefik_sso_middleware) | length > 0) and lookup('vars', radarr_name + '_external_auth', default=radarr_role_external_auth)
modified
roles/radarr/tasks/subtasks/auth.yml
@@ -9,7 +9,7 @@ --- - name: Auth | Wait for 'config.xml' to be created ansible.builtin.wait_for:- path: "/opt/{{ radarr_name }}/config.xml"+ path: "{{ server_appdata_path }}/{{ radarr_name }}/config.xml" state: present - name: Auth | Wait for 10 seconds@@ -18,7 +18,7 @@ - name: Auth | Lookup AuthenticationMethod value community.general.xml:- path: "/opt/{{ radarr_name }}/config.xml"+ path: "{{ server_appdata_path }}/{{ radarr_name }}/config.xml" xpath: "/Config/AuthenticationMethod" content: "text" register: xmlresp@@ -28,7 +28,7 @@ block: - name: Auth | Change the 'AuthenticationMethod' attribute to 'External' community.general.xml:- path: "/opt/{{ radarr_name }}/config.xml"+ path: "{{ server_appdata_path }}/{{ radarr_name }}/config.xml" xpath: "/Config/AuthenticationMethod" value: "External"
modified
roles/rclone/tasks/main.yml
@@ -8,13 +8,13 @@ ######################################################################### --- - name: Build URL Tasks- ansible.builtin.import_tasks: "subtasks/01_build_url.yml"+ ansible.builtin.include_tasks: "subtasks/01_build_url.yml" - name: Install Binary Tasks- ansible.builtin.import_tasks: "subtasks/02_install_binary.yml"+ ansible.builtin.include_tasks: "subtasks/02_install_binary.yml" - name: Import Existing Config Tasks- ansible.builtin.import_tasks: "subtasks/03_import_config.yml"+ ansible.builtin.include_tasks: "subtasks/03_import_config.yml" - name: Preinstall Tasks ansible.builtin.include_tasks: "subtasks/99_preinstall.yml"
modified
roles/rclone/tasks/subtasks/01_build_url.yml
@@ -9,7 +9,7 @@ --- - name: Build URL | Install common packages ansible.builtin.apt:- state: present+ state: latest name: - curl - jq@@ -65,7 +65,7 @@ if ((rclone.version | type_debug == 'float') or (rclone.version | type_debug == 'int')) else (rclone.version | regex_replace('(^v\\.|^v)', '')) }} - - name: Build URL | Check if version '{{ rclone_version_specified0 }}' is available+ - name: Build URL | Check if specified version is available ansible.builtin.shell: | curl -sL {{ svm }}https://api.github.com/repos/ncw/rclone/git/refs/tags \ | jq -r '.[] | .ref' | sed 's/\/\?refs\/tags\/v//g' \
modified
roles/rclone/tasks/subtasks/02_install_binary.yml
@@ -9,7 +9,7 @@ --- - name: Install Binary | Install common packages ansible.builtin.apt:- state: present+ state: latest name: - unzip - man-db
modified
roles/redis/defaults/main.yml
@@ -17,102 +17,54 @@ # Paths ################################ -redis_paths_folder: "{{ redis_name }}"-redis_paths_location: "{{ server_appdata_path }}/{{ redis_paths_folder }}"-redis_paths_folders_list:- - "{{ redis_paths_location }}"+redis_role_paths_folder: "{{ redis_name }}"+redis_role_paths_location: "{{ server_appdata_path }}/{{ redis_role_paths_folder }}"+redis_role_paths_folders_list:+ - "{{ redis_role_paths_location }}" ################################ # Docker ################################ # Container-redis_docker_container: "{{ redis_name }}"+redis_role_docker_container: "{{ redis_name }}" # Image-redis_docker_image_pull: true-redis_docker_image_tag: "alpine"-redis_docker_image_repo: "redis"-redis_docker_image: "{{ lookup('vars', redis_name + '_docker_image_repo', default=redis_docker_image_repo) }}:{{ lookup('vars', redis_name + '_docker_image_tag', default=redis_docker_image_tag) }}"--# Ports-redis_docker_ports_defaults: []-redis_docker_ports_custom: []-redis_docker_ports: "{{ lookup('vars', redis_name + '_docker_ports_defaults', default=redis_docker_ports_defaults)- + lookup('vars', redis_name + '_docker_ports_custom', default=redis_docker_ports_custom) }}"+redis_role_docker_image_pull: true+redis_role_docker_image_tag: "alpine"+redis_role_docker_image_repo: "redis"+redis_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='redis') }}:{{ lookup('role_var', '_docker_image_tag', role='redis') }}" # Envs-redis_docker_envs_default:+redis_role_docker_envs_default: TZ: "{{ tz }}"-redis_docker_envs_custom: {}-redis_docker_envs: "{{ lookup('vars', redis_name + '_', default=redis_docker_envs_default)- | combine(lookup('vars', redis_name + '_docker_envs_custom', default=redis_docker_envs_custom)) }}"--# Commands-redis_docker_commands_default: []-redis_docker_commands_custom: []-redis_docker_commands: "{{ lookup('vars', redis_name + '_docker_commands_default', default=redis_docker_commands_default)- + lookup('vars', redis_name + '_docker_commands_custom', default=redis_docker_commands_custom) }}"+redis_role_docker_envs_custom: {}+redis_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='redis')+ | combine(lookup('role_var', '_docker_envs_custom', role='redis')) }}" # Volumes-redis_docker_volumes_default:- - "{{ redis_paths_location }}:/data"-redis_docker_volumes_custom: []-redis_docker_volumes: "{{ lookup('vars', redis_name + '_docker_volumes_default', default=redis_docker_volumes_default)- + lookup('vars', redis_name + '_docker_volumes_custom', default=redis_docker_volumes_custom) }}"--# Devices-redis_docker_devices_default: []-redis_docker_devices_custom: []-redis_docker_devices: "{{ lookup('vars', redis_name + '_docker_devices_default', default=redis_docker_devices_default)- + lookup('vars', redis_name + '_docker_devices_custom', default=redis_docker_devices_custom) }}"--# Hosts-redis_docker_hosts_default: {}-redis_docker_hosts_custom: {}-redis_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', redis_name + '_docker_hosts_default', default=redis_docker_hosts_default))- | combine(lookup('vars', redis_name + '_docker_hosts_custom', default=redis_docker_hosts_custom)) }}"--# Labels-redis_docker_labels_default: {}-redis_docker_labels_custom: {}-redis_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', redis_name + '_docker_labels_default', default=redis_docker_labels_default))- | combine(lookup('vars', redis_name + '_docker_labels_custom', default=redis_docker_labels_custom)) }}"+redis_role_docker_volumes_default:+ - "{{ redis_role_paths_location }}:/data"+redis_role_docker_volumes_custom: []+redis_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='redis')+ + lookup('role_var', '_docker_volumes_custom', role='redis') }}" # Hostname-redis_docker_hostname: "{{ redis_name }}"--# Network Mode-redis_docker_network_mode_default: "{{ docker_networks_name_common }}"-redis_docker_network_mode: "{{ lookup('vars', redis_name + '_docker_network_mode_default', default=redis_docker_network_mode_default) }}"+redis_role_docker_hostname: "{{ redis_name }}" # Networks-redis_docker_networks_alias: "{{ redis_name }}"-redis_docker_networks_default: []-redis_docker_networks_custom: []-redis_docker_networks: "{{ docker_networks_common- + lookup('vars', redis_name + '_docker_networks_default', default=redis_docker_networks_default)- + lookup('vars', redis_name + '_docker_networks_custom', default=redis_docker_networks_custom) }}"--# Capabilities-redis_docker_capabilities_default: []-redis_docker_capabilities_custom: []-redis_docker_capabilities: "{{ lookup('vars', redis_name + '_docker_capabilities_default', default=redis_docker_capabilities_default)- + lookup('vars', redis_name + '_docker_capabilities_custom', default=redis_docker_capabilities_custom) }}"--# Security Opts-redis_docker_security_opts_default: []-redis_docker_security_opts_custom: []-redis_docker_security_opts: "{{ lookup('vars', redis_name + '_docker_security_opts_default', default=redis_docker_security_opts_default)- + lookup('vars', redis_name + '_docker_security_opts_custom', default=redis_docker_security_opts_custom) }}"+redis_role_docker_networks_alias: "{{ redis_name }}"+redis_role_docker_networks_default: []+redis_role_docker_networks_custom: []+redis_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='redis')+ + lookup('role_var', '_docker_networks_custom', role='redis') }}" # Restart Policy-redis_docker_restart_policy: unless-stopped+redis_role_docker_restart_policy: unless-stopped # State-redis_docker_state: started+redis_role_docker_state: started # User-redis_docker_user: "{{ uid }}:{{ gid }}"+redis_role_docker_user: "{{ uid }}:{{ gid }}"
modified
roles/redis/tasks/main2.yml
@@ -12,7 +12,7 @@ - name: Reset Redis directory ansible.builtin.file:- path: "{{ redis_paths_location }}"+ path: "{{ redis_role_paths_location }}" state: absent when: ('authelia-reset' in ansible_run_tags)
modified
roles/remote/tasks/cleanup.yml
@@ -17,34 +17,34 @@ - name: "Cleanup | Fail if 'rclone_service_name' is empty" ansible.builtin.fail: msg: "rclone_service_name is empty"- when: rclone_service_name | length == 0+ when: (rclone_service_name | length == 0) - name: "Cleanup | Stop and disable existing '{{ rclone_service_name }}.service'" ansible.builtin.systemd_service: name: "{{ rclone_service_name }}.service" state: stopped enabled: false- when: rclone_service_name_full in saltbox_managed_rclone_services and rclone_service_status != 'not-found' and rclone_service_status != 'failed'+ when: (rclone_service_name_full in saltbox_managed_rclone_services) and (rclone_service_status != 'not-found') and (rclone_service_status != 'failed') - name: "Cleanup | Delete '{{ rclone_service_name }}.service'" ansible.builtin.file: path: "/etc/systemd/system/{{ rclone_service_name }}.service" state: absent- when: rclone_service_name_full in saltbox_managed_rclone_services+ when: (rclone_service_name_full in saltbox_managed_rclone_services) - name: "Cleanup | Stop and disable existing '{{ rclone_refresh_service_name }}'" ansible.builtin.systemd_service: name: "{{ rclone_refresh_service_name }}" state: stopped enabled: false- when: rclone_refresh_service_name in saltbox_managed_rclone_services and rclone_service_status != 'not-found' and rclone_service_status != 'failed'+ when: (rclone_refresh_service_name in saltbox_managed_rclone_services) and (rclone_service_status != 'not-found') and (rclone_service_status != 'failed') - name: "Cleanup | Stop and disable existing '{{ rclone_service_name }}_refresh.timer'" ansible.builtin.systemd_service: name: "{{ rclone_service_name }}_refresh.timer" state: stopped enabled: false- when: rclone_refresh_service_name in saltbox_managed_rclone_services and rclone_service_status != 'not-found' and rclone_service_status != 'failed'+ when: (rclone_refresh_service_name in saltbox_managed_rclone_services) and (rclone_service_status != 'not-found') and (rclone_service_status != 'failed') - name: "Cleanup | Delete '{{ rclone_service_name }}_refresh' services" ansible.builtin.file:@@ -55,7 +55,7 @@ loop: - "{{ rclone_service_name }}_refresh.service" - "{{ rclone_service_name }}_refresh.timer"- when: rclone_refresh_service_name in saltbox_managed_rclone_services+ when: (rclone_refresh_service_name in saltbox_managed_rclone_services) - name: "Cleanup | Set 'cleanup_mount_path'" ansible.builtin.set_fact:@@ -64,7 +64,7 @@ - name: "Cleanup | Fail if 'cleanup_mount_path' is empty" ansible.builtin.fail: msg: "rclone_service_name is empty"- when: cleanup_mount_path | length == 0+ when: (cleanup_mount_path | length == 0) - name: "Cleanup | Check if '{{ cleanup_mount_path }}' exists" ansible.builtin.stat:@@ -92,7 +92,7 @@ when: cleanup_mount_path_stat2.stat.exists - name: "Cleanup | Backup non-empty '{{ cleanup_mount_path }}' path"- ansible.builtin.shell: "mv {{ cleanup_mount_path }} {{ cleanup_mount_path }}_{{ '%Y-%m-%d_%H.%M.%S' | strftime(ansible_date_time['epoch'] | int) }}"+ ansible.builtin.shell: "mv {{ cleanup_mount_path }} {{ cleanup_mount_path }}_{{ '%Y-%m-%d_%H.%M.%S' | strftime(ansible_facts['date_time']['epoch'] | int) }}" ignore_errors: true when: cleanup_mount_path_stat2.stat.exists and (cleanup_mount_path_stat_files.matched | int > 0)
modified
roles/remote/tasks/main.yml
@@ -22,8 +22,8 @@ - name: Get Docker service state ansible.builtin.set_fact:- remote_docker_service_running: "{{ (services['docker.service'] is defined) and (services['docker.service']['state'] == 'running') }}"- remote_docker_controller_service_running: "{{ (services['saltbox_managed_docker_controller.service'] is defined) and (services['saltbox_managed_docker_controller.service']['state'] == 'running') }}"+ remote_docker_service_running: "{{ (ansible_facts['services']['docker.service'] is defined) and (ansible_facts['services']['docker.service']['state'] == 'running') }}"+ remote_docker_controller_service_running: "{{ (ansible_facts['services']['saltbox_managed_docker_controller.service'] is defined) and (ansible_facts['services']['saltbox_managed_docker_controller.service']['state'] == 'running') }}" when: remote_docker_binary.stat.exists - name: Tasks for when Docker exists and is running@@ -38,6 +38,13 @@ ansible.builtin.set_fact: containers_list: "{{ remote_docker_running_containers_ps.stdout }}" + - name: Check for saltbox_docker_controller:app (legacy controller) in service file+ ansible.builtin.shell: "grep -q 'saltbox_docker_controller:app' /etc/systemd/system/saltbox_managed_docker_controller.service"+ register: remote_legacy_docker_controller+ failed_when: false+ changed_when: false+ when: (remote_docker_controller_service_running is defined) and remote_docker_controller_service_running+ - name: Block Docker Controller ansible.builtin.uri: url: "{{ docker_controller_url }}/block/20"@@ -50,7 +57,7 @@ ansible.builtin.systemd_service: name: saltbox_managed_docker_controller state: restarted- when: remote_docker_controller_service_running is defined and remote_docker_controller_service_running+ when: (remote_docker_controller_service_running is defined) and remote_docker_controller_service_running and (remote_legacy_docker_controller.rc is not defined or remote_legacy_docker_controller.rc != 0) - name: Wait until Controller is ready ansible.builtin.uri:@@ -78,7 +85,7 @@ - name: Stop Saltbox Docker containers ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/stop_saltbox_docker_containers.yml"- when: remote_docker_controller_service_running is defined and remote_docker_controller_service_running+ when: (remote_docker_controller_service_running is defined) and remote_docker_controller_service_running - name: "Stop all running Docker containers" ansible.builtin.shell: "docker stop {{ containers_list }}"@@ -91,7 +98,7 @@ method: POST timeout: 600 ignore_errors: true- when: remote_docker_controller_service_running is defined and remote_docker_controller_service_running+ when: (remote_docker_controller_service_running is defined) and remote_docker_controller_service_running - name: Stop docker service ansible.builtin.systemd_service:@@ -113,14 +120,14 @@ block: - name: "Create services folder." ansible.builtin.file:- path: "/opt/services"+ path: "{{ server_appdata_path }}/services" state: directory owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775" recurse: true - - name: "Synchronize '/etc/systemd/system' to '/opt/services'"+ - name: "Synchronize '/etc/systemd/system' to '{{ server_appdata_path }}/services'" ansible.builtin.shell: | /usr/bin/rsync \ --delay-updates \@@ -133,14 +140,14 @@ --exclude='saltbox_managed_*' \ --include='*.service' \ --include='*.mount' \- /etc/systemd/system/* /opt/services/+ /etc/systemd/system/* {{ server_appdata_path }}/services/ args: executable: /bin/bash ignore_errors: true - name: "Reset permissions of folders" ansible.builtin.file:- path: "/opt/services"+ path: "{{ server_appdata_path }}/services" state: directory owner: "{{ user.name }}" group: "{{ user.name }}"@@ -203,7 +210,7 @@ - name: Get list of existing remote services ansible.builtin.set_fact: saltbox_managed_rclone_services: "{{ (saltbox_managed_rclone_services | default({})) | combine({rclone_service.key: rclone_service.value.status}) }}"- loop: "{{ services | dict2items }}"+ loop: "{{ ansible_facts['services'] | dict2items }}" loop_control: loop_var: rclone_service when: (rclone_service_template in rclone_service.key)@@ -227,9 +234,9 @@ - name: Remote Tasks ansible.builtin.include_tasks: "remote.yml" with_items: "{{ rclone.remotes }}"- when: rclone_remote_is_defined and item.settings.template != "nfs" and item.settings.mount+ when: rclone_remote_is_defined and (item.settings.template != "nfs") and (item.settings.mount | bool) - name: Remote Tasks (NFS) ansible.builtin.include_tasks: "nfs.yml" with_items: "{{ rclone.remotes }}"- when: rclone_remote_is_defined and item.settings.template == "nfs" and item.settings.mount+ when: rclone_remote_is_defined and (item.settings.template == "nfs") and (item.settings.mount | bool)
modified
roles/remote/tasks/nfs.yml
@@ -10,7 +10,7 @@ - name: "Remote (NFS) | Install NFS requirements" ansible.builtin.apt: name: nfs-common- state: present+ state: latest - name: "Remote (NFS) | Set Variables" ansible.builtin.set_fact:@@ -43,7 +43,7 @@ ignore_errors: true - name: "Remote (NFS) | Backup non-empty '{{ mount_path }}' path"- ansible.builtin.shell: "mv {{ mount_path }} {{ mount_path }}_{{ '%Y-%m-%d_%H.%M.%S' | strftime(ansible_date_time['epoch'] | int) }}"+ ansible.builtin.shell: "mv {{ mount_path }} {{ mount_path }}_{{ '%Y-%m-%d_%H.%M.%S' | strftime(ansible_facts['date_time']['epoch'] | int) }}" ignore_errors: true when: (mount_path_stat_files.matched | int > 0)
modified
roles/remote/tasks/remote.yml
@@ -46,7 +46,7 @@ ignore_errors: true - name: "Remote | Backup non-empty '{{ mount_path }}' path"- ansible.builtin.shell: "mv {{ mount_path }} {{ mount_path }}_{{ '%Y-%m-%d_%H.%M.%S' | strftime(ansible_date_time['epoch'] | int) }}"+ ansible.builtin.shell: "mv {{ mount_path }} {{ mount_path }}_{{ '%Y-%m-%d_%H.%M.%S' | strftime(ansible_facts['date_time']['epoch'] | int) }}" ignore_errors: true when: (mount_path_stat_files.matched | int > 0)
modified
roles/remote/templates/dropbox.j2
@@ -19,10 +19,10 @@ ExecStartPre=/bin/sleep 10 ExecStart=/usr/bin/rclone mount \ --allow-other \-{% if mounts.ipv4_only %}- --bind={{ ansible_default_ipv4.address }} \+{% if rclone_mounts_ipv4_only %}+ --bind={{ ansible_facts['default_ipv4']['address'] }} \ {% endif %}-{% if (rclone_vfs_cache_dir_lookup | length > 0) and item.settings.vfs_cache.enabled %}+{% if (rclone_vfs_cache_dir_lookup | length > 0) and (item.settings.vfs_cache.enabled | bool) %} --cache-dir={{ rclone_vfs_cache_dir_lookup }} \ {% endif %} --config={{ rclone_config_path }} \@@ -33,7 +33,7 @@ --poll-interval=30s \ --rc \ --rc-addr=localhost:{{ rclone_remort_port }} \-{% if rclone_enable_metrics %}+{% if (rclone_enable_metrics | bool) %} --metrics-addr=172.19.0.1:{{ rclone_remort_port }} \ {% endif %} --rc-no-auth \@@ -42,7 +42,7 @@ --umask=002 \ --use-mmap \ --user-agent='{{ user_agent }}' \-{% if item.settings.vfs_cache.enabled %}+{% if (item.settings.vfs_cache.enabled | bool) %} --vfs-cache-min-free-space={{ rclone_vfs_cache_min_free_space }} \ --vfs-cache-max-age={{ item.settings.vfs_cache.max_age | default('504h') }} \ --vfs-cache-max-size={{ item.settings.vfs_cache.size | default('50G') }} \@@ -52,7 +52,7 @@ --vfs-read-ahead=128M \ {% endif %} --vfs-read-chunk-size-limit=2G \- --vfs-read-chunk-size={{ '32M' if item.settings.vfs_cache.enabled else '64M' }} \+ --vfs-read-chunk-size={{ '32M' if (item.settings.vfs_cache.enabled | bool) else '64M' }} \ -v \ "{{ rclone_remote_with_path }}" "/mnt/remote/{{ rclone_remote_name }}" ExecStop=/bin/fusermount3 -uz "/mnt/remote/{{ rclone_remote_name }}"@@ -60,7 +60,7 @@ RestartSec=5 StartLimitInterval=60s StartLimitBurst=3-{% if item.settings.vfs_cache.enabled %}+{% if (item.settings.vfs_cache.enabled | bool) %} TimeoutSec=21600 LimitNOFILE=infinity LimitMEMLOCK=infinity
modified
roles/remote/templates/google.j2
@@ -19,16 +19,16 @@ ExecStartPre=/bin/sleep 10 ExecStart=/usr/bin/rclone mount \ --allow-other \-{% if mounts.ipv4_only %}- --bind={{ ansible_default_ipv4.address }} \+{% if rclone_mounts_ipv4_only %}+ --bind={{ ansible_facts['default_ipv4']['address'] }} \ {% endif %}-{% if (rclone_vfs_cache_dir_lookup | length > 0) and item.settings.vfs_cache.enabled %}+{% if (rclone_vfs_cache_dir_lookup | length > 0) and (item.settings.vfs_cache.enabled | bool) %} --cache-dir={{ rclone_vfs_cache_dir_lookup }} \ {% endif %} --config={{ rclone_config_path }} \- --buffer-size={{ '32M' if item.settings.vfs_cache.enabled else '64M' }} \+ --buffer-size={{ '32M' if (item.settings.vfs_cache.enabled | bool) else '64M' }} \ --dir-cache-time={{ rclone_cloud_dir_cache_time }} \-{% if not item.settings.vfs_cache.enabled %}+{% if not (item.settings.vfs_cache.enabled | bool) %} --drive-chunk-size=64M \ {% endif %} --drive-pacer-burst=1000 \@@ -37,7 +37,7 @@ --poll-interval=15s \ --rc \ --rc-addr=localhost:{{ rclone_remort_port }} \-{% if rclone_enable_metrics %}+{% if (rclone_enable_metrics | bool) %} --metrics-addr=172.19.0.1:{{ rclone_remort_port }} \ {% endif %} --rc-no-auth \@@ -46,7 +46,7 @@ --umask=002 \ --use-mmap \ --user-agent='{{ user_agent }}' \-{% if item.settings.vfs_cache.enabled %}+{% if (item.settings.vfs_cache.enabled | bool) %} --vfs-cache-min-free-space={{ rclone_vfs_cache_min_free_space }} \ --vfs-cache-max-age={{ item.settings.vfs_cache.max_age | default('504h') }} \ --vfs-cache-max-size={{ item.settings.vfs_cache.size | default('50G') }} \@@ -56,7 +56,7 @@ --vfs-read-ahead=128M \ {% endif %} --vfs-read-chunk-size-limit=2G \- --vfs-read-chunk-size={{ '32M' if item.settings.vfs_cache.enabled else '64M' }} \+ --vfs-read-chunk-size={{ '32M' if (item.settings.vfs_cache.enabled | bool) else '64M' }} \ -v \ "{{ rclone_remote_with_path }}" "/mnt/remote/{{ rclone_remote_name }}" ExecStop=/bin/fusermount3 -uz "/mnt/remote/{{ rclone_remote_name }}"@@ -64,7 +64,7 @@ RestartSec=5 StartLimitInterval=60s StartLimitBurst=3-{% if item.settings.vfs_cache.enabled %}+{% if (item.settings.vfs_cache.enabled | bool) %} TimeoutSec=21600 LimitNOFILE=infinity LimitMEMLOCK=infinity
modified
roles/remote/templates/sftp.j2
@@ -19,7 +19,7 @@ ExecStartPre=/bin/sleep 10 ExecStart=/usr/bin/rclone mount \ --allow-other \-{% if (rclone_vfs_cache_dir_lookup | length > 0) and item.settings.vfs_cache.enabled %}+{% if (rclone_vfs_cache_dir_lookup | length > 0) and (item.settings.vfs_cache.enabled | bool) %} --cache-dir={{ rclone_vfs_cache_dir_lookup }} \ {% endif %} --config={{ rclone_config_path }} \@@ -28,19 +28,19 @@ --max-read-ahead=200M \ --rc \ --rc-addr=localhost:{{ rclone_remort_port }} \-{% if rclone_enable_metrics %}+{% if (rclone_enable_metrics | bool) %} --metrics-addr=172.19.0.1:{{ rclone_remort_port }} \ {% endif %} --rc-no-auth \ --sftp-chunk-size={{ rclone_sftp_chunk_size }} \ --sftp-concurrency={{ rclone_sftp_concurrency }} \-{% if rclone_sftp_disable_hashcheck %}+{% if (rclone_sftp_disable_hashcheck | bool) %} --sftp-disable-hashcheck \ {% endif %} --syslog \ --umask=002 \ --user-agent='{{ user_agent }}' \-{% if item.settings.vfs_cache.enabled %}+{% if (item.settings.vfs_cache.enabled | bool) %} --vfs-cache-min-free-space={{ rclone_vfs_cache_min_free_space }} \ --vfs-cache-max-age={{ item.settings.vfs_cache.max_age | default('504h') }} \ --vfs-cache-max-size={{ item.settings.vfs_cache.size | default('50G') }} \
modified
roles/restore/tasks/main.yml
@@ -12,14 +12,14 @@ msg: - "Rclone backup is not enabled." - "You must enable rclone, in the backup settings, to perform a with a specific tar file restore."- when: (not backup.rclone.enable) and (restore_tar is defined)+ when: (not backup_rclone_enabled) and (restore_tar is defined) - name: Fail when local backup method is enabled and using restore_tar ansible.builtin.fail: msg: - "The restore_tar setting does not work with a local backup enabled." - "Remember to empty the local backup folder if you disable the setting."- when: backup.local.enable and (restore_tar is defined)+ when: backup_rclone_enabled and (restore_tar is defined) - name: "Check if user '{{ user.name }}' exists" ansible.builtin.shell: "id -un {{ user.name }} >/dev/null 2>&1;"@@ -37,7 +37,7 @@ when: (user_check.rc == 1) and (not restore_tar is defined) - name: Variables- ansible.builtin.import_tasks: "variables.yml"+ ansible.builtin.include_tasks: "variables.yml" - name: "Check if 'localhost.yml' exists in '{{ playbook_dir }}'" ansible.builtin.stat:@@ -85,8 +85,8 @@ - name: BTRFS Tasks ansible.builtin.include_tasks: "btrfs.yml" loop:- - /opt- - /mnt/local+ - "{{ server_appdata_path }}"+ - "{{ server_local_folder_path }}" loop_control: loop_var: outer_item @@ -98,8 +98,8 @@ group: "{{ user.name }}" mode: "0775" with_items:- - /opt- - /mnt/local+ - "{{ server_appdata_path }}"+ - "{{ server_local_folder_path }}" - name: "Create backup location '{{ backup.local.destination }}'" ansible.builtin.file:@@ -129,11 +129,11 @@ # Restore from remote backup when local backup does not exist - name: Restore Remote- ansible.builtin.import_tasks: "restore_remote.yml"+ ansible.builtin.include_tasks: "restore_remote.yml" when: (dir_files.matched | int == 0) and (not restore_tar is defined) - name: Restore specific tar- ansible.builtin.import_tasks: "restore_tar.yml"+ ansible.builtin.include_tasks: "restore_tar.yml" when: (dir_files.matched | int == 0) and (restore_tar is defined) - name: "Look for 'backup_excludes_list.txt' file in '{{ backup.local.destination }}'"@@ -143,7 +143,7 @@ - name: "Delete 'z' opt folder when doing full restore" ansible.builtin.file:- path: "/opt/z"+ path: "{{ server_appdata_path }}/z" state: absent when: (not restore_tar is defined) @@ -157,23 +157,67 @@ force: true when: backup_excludes_list.stat.exists and (not restore_tar is defined) -- name: "Backup existing folders in '/opt' to prevent overwriting them"- ansible.builtin.shell: mv '/opt/{{ (item | basename | splitext)[0] }}' '/opt/{{ (item | basename | splitext)[0] }}_bak' 2>/dev/null || true+- name: "Check for existing backup folders (full restore)"+ ansible.builtin.stat:+ path: "{{ server_appdata_path }}/{{ (item | basename | splitext)[0] }}_bak" with_fileglob: - "{{ backup.local.destination }}/opt/*.tar"- loop_control:- label: "'/opt/{{ (item | basename | splitext)[0] }}' --> '/opt/{{ (item | basename | splitext)[0] }}_bak'"--- name: "Unarchive backup tarballs into '/opt'"- ansible.builtin.shell: tar -xf '{{ item }}' -C '/opt/'+ register: existing_bak_folders_full+ when: (not restore_tar is defined)++- name: "Fail if backup folders already exist (full restore)"+ ansible.builtin.fail:+ msg:+ - "Backup folder '{{ server_appdata_path }}/{{ (item.item | basename | splitext)[0] }}_bak' already exists."+ - "Please remove or rename existing backup folders before running restore."+ - "This prevents accidental loss of existing backup data."+ with_items: "{{ existing_bak_folders_full.results }}"+ when: (not restore_tar is defined) and item.stat.exists+ loop_control:+ label: "'{{ server_appdata_path }}/{{ (item.item | basename | splitext)[0] }}_bak'"++- name: "Backup existing folders in '{{ server_appdata_path }}' to prevent overwriting them (full restore)"+ ansible.builtin.shell: mv '{{ server_appdata_path }}/{{ (item | basename | splitext)[0] }}' '{{ server_appdata_path }}/{{ (item | basename | splitext)[0] }}_bak' 2>/dev/null || true with_fileglob: - "{{ backup.local.destination }}/opt/*.tar" loop_control:- label: "'{{ item | basename }}' --> '/opt/{{ (item | basename | splitext)[0] }}'"+ label: "'{{ server_appdata_path }}/{{ (item | basename | splitext)[0] }}' --> '{{ server_appdata_path }}/{{ (item | basename | splitext)[0] }}_bak'"+ when: (not restore_tar is defined)++- name: "Check for existing backup folder (specific tar restore)"+ ansible.builtin.stat:+ path: "{{ server_appdata_path }}/{{ (restore_tar | basename | splitext)[0] }}_bak"+ register: existing_bak_folder_specific+ when: (restore_tar is defined)++- name: "Fail if backup folder already exists (specific tar restore)"+ ansible.builtin.fail:+ msg:+ - "Backup folder '{{ server_appdata_path }}/{{ (restore_tar | basename | splitext)[0] }}_bak' already exists."+ - "Please remove or rename the existing backup folder before running restore."+ - "This prevents accidental loss of existing backup data."+ when: (restore_tar is defined) and existing_bak_folder_specific.stat.exists++- name: "Backup existing folder in '{{ server_appdata_path }}' to prevent overwriting it (specific tar restore)"+ ansible.builtin.shell: mv '{{ server_appdata_path }}/{{ (restore_tar | basename | splitext)[0] }}' '{{ server_appdata_path }}/{{ (restore_tar | basename | splitext)[0] }}_bak' 2>/dev/null || true+ when: (restore_tar is defined)++- name: "Unarchive backup tarballs into '{{ server_appdata_path }}' (full restore)"+ ansible.builtin.shell: tar -xf '{{ item }}' -C '{{ server_appdata_path }}/'+ with_fileglob:+ - "{{ backup.local.destination }}/opt/*.tar"+ loop_control:+ label: "'{{ item | basename }}' --> '{{ server_appdata_path }}/{{ (item | basename | splitext)[0] }}'" register: unarchive--- name: "Set '/opt' ownership and permissions"- ansible.builtin.import_tasks: "permissions.yml"+ when: (not restore_tar is defined)++- name: "Unarchive specific backup tarball into '{{ server_appdata_path }}' (specific tar restore)"+ ansible.builtin.shell: tar -xf '{{ backup.local.destination }}/opt/{{ restore_tar }}' -C '{{ server_appdata_path }}/'+ register: unarchive+ when: (restore_tar is defined)++- name: "Set '{{ server_appdata_path }}' ownership and permissions"+ ansible.builtin.include_tasks: "permissions.yml" tags: opt-permissions-reset - name: Cleanup backup location@@ -182,13 +226,9 @@ state: absent become: true become_user: "{{ user.name }}"- when: (not backup.local.enable)+ when: (not backup_local_enabled) - name: Finished restoring the backup ansible.builtin.debug: msg: Finished restoring the backup. You are now ready to install Saltbox! when: (unarchive is succeeded)--- name: Settings Role- ansible.builtin.include_role:- name: settings
modified
roles/restore/tasks/permissions.yml
@@ -7,8 +7,8 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Set '/opt' ownership recursively- ansible.builtin.shell: "chown -R {{ user.name }}:{{ user.name }} /opt"+- name: Set '{{ server_appdata_path }}' ownership recursively+ ansible.builtin.shell: "chown -R {{ user.name }}:{{ user.name }} {{ server_appdata_path }}" -- name: Set '/opt' permissions recursively- ansible.builtin.shell: "chmod -R ugo+X /opt"+- name: Set '{{ server_appdata_path }}' permissions recursively+ ansible.builtin.shell: "chmod -R ugo+X {{ server_appdata_path }}"
modified
roles/restore/tasks/restore_remote.yml
@@ -7,16 +7,12 @@ # GNU General Public License v3.0 # ######################################################################### ----# Checks- - name: Fail when no backup method is enabled or when no local backup exists ansible.builtin.fail: msg: - "Rclone is not enabled and no local backup exists." - "You must either enable rclone, in the backup settings, or provide backup tarball files locally, to perform a restore."- when: (not backup.rclone.enable)--# Folder+ when: (not backup_rclone_enabled) - name: Cleanup backup location ansible.builtin.file:@@ -24,8 +20,6 @@ state: absent become: true become_user: "{{ user.name }}"--# Rclone - name: "Check if 'rclone.conf' exists in '{{ playbook_dir }}' folder" ansible.builtin.stat:@@ -88,8 +82,6 @@ become: true become_user: "{{ user.name }}" -# Checks- - name: "Check if tar files were retrieved" ansible.builtin.find: paths: "{{ backup.local.destination }}/opt"
modified
roles/restore/tasks/restore_tar.yml
@@ -7,16 +7,12 @@ # GNU General Public License v3.0 # ######################################################################### ----# Folder- - name: Cleanup backup location ansible.builtin.file: path: "{{ backup.local.destination }}" state: absent become: true become_user: "{{ user.name }}"--# Rclone - name: "Check if 'rclone.conf' exists in '{{ playbook_dir }}' folder" ansible.builtin.stat:@@ -79,8 +75,6 @@ become: true become_user: "{{ user.name }}" -# Checks- - name: "Check if tar files were retrieved" ansible.builtin.find: paths: "{{ backup.local.destination }}/opt"
modified
roles/rutorrent/defaults/main.yml
@@ -17,70 +17,70 @@ # Paths ################################ -rutorrent_paths_folder: "{{ rutorrent_name }}"-rutorrent_paths_location: "{{ server_appdata_path }}/{{ rutorrent_paths_folder }}"-rutorrent_paths_downloads_location: "{{ downloads_torrents_path }}/{{ rutorrent_paths_folder }}"-rutorrent_paths_folders_list:- - "{{ rutorrent_paths_location }}"- - "{{ rutorrent_paths_location }}/plugins"- - "{{ rutorrent_paths_location }}/themes"- - "{{ rutorrent_paths_downloads_location }}"- - "{{ rutorrent_paths_downloads_location }}/completed"- - "{{ rutorrent_paths_downloads_location }}/incoming"- - "{{ rutorrent_paths_downloads_location }}/watched"--# Config files-rutorrent_paths_config_php_location: "{{ rutorrent_paths_location }}/rutorrent/settings/config.php"-rutorrent_paths_rtorrent_rc_location: "{{ rutorrent_paths_location }}/rtorrent/rtorrent.rc"-rutorrent_paths_php_local_ini_location: "{{ rutorrent_paths_location }}/php/php-local.ini"-rutorrent_paths_plugins_ini_location: "{{ rutorrent_paths_location }}/rutorrent/settings/plugins.ini"+rutorrent_role_paths_folder: "{{ rutorrent_name }}"+rutorrent_role_paths_location: "{{ server_appdata_path }}/{{ rutorrent_role_paths_folder }}"+rutorrent_role_paths_downloads_location: "{{ downloads_torrents_path }}/{{ rutorrent_role_paths_folder }}"+rutorrent_role_paths_config_php_location: "{{ rutorrent_role_paths_location }}/rutorrent/settings/config.php"+rutorrent_role_paths_rtorrent_rc_location: "{{ rutorrent_role_paths_location }}/rtorrent/rtorrent.rc"+rutorrent_role_paths_php_local_ini_location: "{{ rutorrent_role_paths_location }}/php/php-local.ini"+rutorrent_role_paths_plugins_ini_location: "{{ rutorrent_role_paths_location }}/rutorrent/settings/plugins.ini"+rutorrent_role_paths_folders_list:+ - "{{ rutorrent_role_paths_location }}"+ - "{{ rutorrent_role_paths_location }}/plugins"+ - "{{ rutorrent_role_paths_location }}/themes"+ - "{{ rutorrent_role_paths_downloads_location }}"+ - "{{ rutorrent_role_paths_downloads_location }}/completed"+ - "{{ rutorrent_role_paths_downloads_location }}/incoming"+ - "{{ rutorrent_role_paths_downloads_location }}/watched" ################################ # Web ################################ -rutorrent_web_subdomain: "{{ rutorrent_name }}"-rutorrent_web_domain: "{{ user.domain }}"-rutorrent_web_port: "80"-rutorrent_web_url: "{{ 'https://' + (rutorrent_web_subdomain + '.' + rutorrent_web_domain- if (rutorrent_web_subdomain | length > 0)- else rutorrent_web_domain) }}"+rutorrent_role_web_subdomain: "{{ rutorrent_name }}"+rutorrent_role_web_domain: "{{ user.domain }}"+rutorrent_role_web_port: "80"+rutorrent_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='rutorrent') + '.' + lookup('role_var', '_web_domain', role='rutorrent')+ if (lookup('role_var', '_web_subdomain', role='rutorrent') | length > 0)+ else lookup('role_var', '_web_domain', role='rutorrent')) }}" ################################ # DNS ################################ -rutorrent_dns_record: "{{ rutorrent_web_subdomain }}"-rutorrent_dns_zone: "{{ rutorrent_web_domain }}"-rutorrent_dns_proxy: "{{ dns.proxied }}"+rutorrent_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='rutorrent') }}"+rutorrent_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='rutorrent') }}"+rutorrent_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -rutorrent_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-rutorrent_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', rutorrent_name + '_name', default=rutorrent_name)- if (rutorrent_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-rutorrent_traefik_middleware_custom: ""-rutorrent_traefik_certresolver: "{{ traefik_default_certresolver }}"-rutorrent_traefik_enabled: true-rutorrent_traefik_api_enabled: true-rutorrent_traefik_api_middleware: "rutorrent-auth,{{ traefik_default_middleware_api }}"-rutorrent_traefik_api_endpoint: "PathPrefix(`/RPC2`)"+rutorrent_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+rutorrent_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + rutorrent_name+ if (lookup('role_var', '_themepark_enabled', role='rutorrent') and global_themepark_plugin_enabled)+ else '') }}"+rutorrent_role_traefik_middleware_custom: ""+rutorrent_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+rutorrent_role_traefik_enabled: true+rutorrent_role_traefik_api_enabled: true+rutorrent_role_traefik_api_middleware: "rutorrent-auth,{{ traefik_default_middleware_api }}"+rutorrent_role_traefik_api_endpoint: "PathPrefix(`/RPC2`)" ################################ # Config ################################ -rutorrent_config_public_trackers: false-rutorrent_config_diskspace_path: "/mnt"+# Toggles if public tracker functionality is enabled+rutorrent_role_config_public_trackers: false+# Path used by the diskspace plugin to check usage+rutorrent_role_config_diskspace_path: "/mnt" ## New Installs # rtorrent.rc-rutorrent_config_new_installs_rutorrent_rc_settings_default:+rutorrent_role_config_new_installs_rutorrent_rc_settings_default: # Minimum number of peers to connect to per torrent - { option: "throttle.min_peers.normal.set", value: "1" } # Maximum number of simultaneous upload slots per torrent@@ -107,152 +107,120 @@ - { option: "directory.default.set", value: "/mnt/unionfs/downloads/torrents/rutorrent/completed" } # Watched Directory - { option: "schedule", value: 'watch_directory,5,5,"load.start=/mnt/unionfs/downloads/torrents/rutorrent/watched/*.torrent,d.delete_tied="' }-rutorrent_config_new_installs_rutorrent_rc_settings_custom: []-rutorrent_config_new_installs_rutorrent_rc_settings_list: "{{ rutorrent_config_new_installs_rutorrent_rc_settings_default- + rutorrent_config_new_installs_rutorrent_rc_settings_custom }}"+rutorrent_role_config_new_installs_rutorrent_rc_settings_custom: []+rutorrent_role_config_new_installs_rutorrent_rc_settings_list: "{{ lookup('role_var', '_config_new_installs_rutorrent_rc_settings_default', role='rutorrent')+ + lookup('role_var', '_config_new_installs_rutorrent_rc_settings_custom', role='rutorrent') }}" # php-local.ini-rutorrent_config_new_installs_php_local_ini_settings_default:+rutorrent_role_config_new_installs_php_local_ini_settings_default: # Maximum Upload File Size via Web Browser (eg Uploading Torrent Files) - { option: "upload_max_filesize", value: "20M" }-rutorrent_config_new_installs_php_local_ini_settings_custom: []-rutorrent_config_new_installs_php_local_ini_settings_list: "{{ rutorrent_config_new_installs_php_local_ini_settings_default- + rutorrent_config_new_installs_php_local_ini_settings_custom }}"+rutorrent_role_config_new_installs_php_local_ini_settings_custom: []+rutorrent_role_config_new_installs_php_local_ini_settings_list: "{{ lookup('role_var', '_config_new_installs_php_local_ini_settings_default', role='rutorrent')+ + lookup('role_var', '_config_new_installs_php_local_ini_settings_custom', role='rutorrent') }}" ## Existing Installs # rtorrent.rc-rutorrent_config_existing_installs_rutorrent_rc_settings_default:+rutorrent_role_config_existing_installs_rutorrent_rc_settings_default: # Execute - Initiate Plugins - { option: "execute", value: "{sh,-c,/usr/bin/php /app/rutorrent/php/initplugins.php abc &}" } # IP address that is reported to the tracker - { option: "network.local_address.set", value: "{{ ip_address_public }}" } # Ports- - { option: "network.port_range.set", value: "{{ rutorrent_docker_ports_51413 }}-{{ rutorrent_docker_ports_51413 }}" }- - { option: "dht.port.set", value: "{{ rutorrent_docker_ports_6881 }}" }+ - { option: "network.port_range.set", value: "{{ rutorrent_role_docker_ports_51413 }}-{{ rutorrent_role_docker_ports_51413 }}" }+ - { option: "dht.port.set", value: "{{ rutorrent_role_docker_ports_6881 }}" } # Enable / Disable Public Trackers- - { option: "dht.mode.set", value: "{{ rutorrent_config_public_trackers | ternary('on', 'disable') }}" }- - { option: "trackers.use_udp.set", value: "{{ rutorrent_config_public_trackers | ternary('yes', 'no') }}" }- - { option: "protocol.pex.set", value: "{{ rutorrent_config_public_trackers | ternary('yes', 'no') }}" }-rutorrent_config_existing_installs_rutorrent_rc_settings_custom: []-rutorrent_config_existing_installs_rutorrent_rc_settings_list: "{{ rutorrent_config_existing_installs_rutorrent_rc_settings_default- + rutorrent_config_existing_installs_rutorrent_rc_settings_custom }}"--################################-# THEME+ - { option: "dht.mode.set", value: "{{ lookup('role_var', '_config_public_trackers', role='rutorrent') | ternary('on', 'disable') }}" }+ - { option: "trackers.use_udp.set", value: "{{ lookup('role_var', '_config_public_trackers', role='rutorrent') | ternary('yes', 'no') }}" }+ - { option: "protocol.pex.set", value: "{{ lookup('role_var', '_config_public_trackers', role='rutorrent') | ternary('yes', 'no') }}" }+rutorrent_role_config_existing_installs_rutorrent_rc_settings_custom: []+rutorrent_role_config_existing_installs_rutorrent_rc_settings_list: "{{ lookup('role_var', '_config_existing_installs_rutorrent_rc_settings_default', role='rutorrent')+ + lookup('role_var', '_config_existing_installs_rutorrent_rc_settings_custom', role='rutorrent') }}"++################################+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-rutorrent_themepark_enabled: false-rutorrent_themepark_app: "rutorrent"-rutorrent_themepark_theme: "{{ global_themepark_theme }}"-rutorrent_themepark_domain: "{{ global_themepark_domain }}"-rutorrent_themepark_addons: []+rutorrent_role_themepark_enabled: false+rutorrent_role_themepark_app: "rutorrent"+rutorrent_role_themepark_theme: "{{ global_themepark_theme }}"+rutorrent_role_themepark_domain: "{{ global_themepark_domain }}"+rutorrent_role_themepark_addons: [] ################################ # Docker ################################ # Container-rutorrent_docker_container: "{{ rutorrent_name }}"+rutorrent_role_docker_container: "{{ rutorrent_name }}" # Image-rutorrent_docker_image_pull: true-rutorrent_docker_image_tag: "latest"-rutorrent_docker_image: "kudeta/ru-rtorrent:{{ rutorrent_docker_image_tag }}"+rutorrent_role_docker_image_pull: true+rutorrent_role_docker_image_tag: "latest"+rutorrent_role_docker_image_repo: "kudeta/ru-rtorrent"+rutorrent_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='rutorrent') }}:{{ lookup('role_var', '_docker_image_tag', role='rutorrent') }}" # Ports-rutorrent_docker_ports_51413: "{{ port_lookup_51413.meta.port- if (port_lookup_51413.meta.port is defined) and (port_lookup_51413.meta.port | trim | length > 0)- else '51413' }}"-rutorrent_docker_ports_6881: "{{ port_lookup_6881.meta.port- if (port_lookup_6881.meta.port is defined) and (port_lookup_6881.meta.port | trim | length > 0)- else '6881' }}"--rutorrent_docker_ports_defaults:- - "{{ rutorrent_docker_ports_51413 }}:{{ rutorrent_docker_ports_51413 }}"- - "{{ rutorrent_docker_ports_51413 }}:{{ rutorrent_docker_ports_51413 }}/udp"- - "{{ rutorrent_docker_ports_6881 }}:{{ rutorrent_docker_ports_6881 }}/udp"-rutorrent_docker_ports_custom: []--rutorrent_docker_ports: "{{ rutorrent_docker_ports_defaults- + rutorrent_docker_ports_custom }}"+rutorrent_role_docker_ports_51413: "{{ port_lookup_51413.meta.port+ if (port_lookup_51413.meta.port is defined) and (port_lookup_51413.meta.port | trim | length > 0)+ else '51413' }}"+rutorrent_role_docker_ports_6881: "{{ port_lookup_6881.meta.port+ if (port_lookup_6881.meta.port is defined) and (port_lookup_6881.meta.port | trim | length > 0)+ else '6881' }}"++rutorrent_role_docker_ports_default:+ - "{{ lookup('role_var', '_docker_ports_51413', role='rutorrent') }}:{{ lookup('role_var', '_docker_ports_51413', role='rutorrent') }}"+ - "{{ lookup('role_var', '_docker_ports_51413', role='rutorrent') }}:{{ lookup('role_var', '_docker_ports_51413', role='rutorrent') }}/udp"+ - "{{ lookup('role_var', '_docker_ports_6881', role='rutorrent') }}:{{ lookup('role_var', '_docker_ports_6881', role='rutorrent') }}/udp"+rutorrent_role_docker_ports_custom: []+rutorrent_role_docker_ports: "{{ lookup('role_var', '_docker_ports_default', role='rutorrent')+ + lookup('role_var', '_docker_ports_custom', role='rutorrent') }}" # Envs-rutorrent_docker_envs_default:+rutorrent_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" TZ: "{{ tz }}"-rutorrent_docker_envs_custom: {}-rutorrent_docker_envs: "{{ rutorrent_docker_envs_default- | combine(rutorrent_docker_envs_custom) }}"--# Commands-rutorrent_docker_commands_default: []-rutorrent_docker_commands_custom: []-rutorrent_docker_commands: "{{ rutorrent_docker_commands_default- + rutorrent_docker_commands_custom }}"+rutorrent_role_docker_envs_custom: {}+rutorrent_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='rutorrent')+ | combine(lookup('role_var', '_docker_envs_custom', role='rutorrent')) }}" # Volumes-rutorrent_docker_volumes_default:- - "{{ rutorrent_paths_location }}:/config"+rutorrent_role_docker_volumes_default:+ - "{{ rutorrent_role_paths_location }}:/config" - "{{ server_appdata_path }}/scripts:/scripts"-rutorrent_docker_volumes_custom: []-rutorrent_docker_volumes: "{{ rutorrent_docker_volumes_default- + rutorrent_docker_volumes_custom }}"--# Devices-rutorrent_docker_devices_default: []-rutorrent_docker_devices_custom: []-rutorrent_docker_devices: "{{ rutorrent_docker_devices_default- + rutorrent_docker_devices_custom }}"--# Hosts-rutorrent_docker_hosts_default: {}-rutorrent_docker_hosts_custom: {}-rutorrent_docker_hosts: "{{ docker_hosts_common- | combine(rutorrent_docker_hosts_default)- | combine(rutorrent_docker_hosts_custom) }}"+rutorrent_role_docker_volumes_custom: []+rutorrent_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='rutorrent')+ + lookup('role_var', '_docker_volumes_custom', role='rutorrent') }}" # Labels-rutorrent_docker_labels_default:+rutorrent_role_docker_labels_default: traefik.http.middlewares.rutorrent-auth.basicauth.usersfile: "/etc/traefik/auth"-rutorrent_docker_labels_custom: {}-rutorrent_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', rutorrent_name + '_docker_labels_default', default=rutorrent_docker_labels_default))- | combine((traefik_themepark_labels- if (rutorrent_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', rutorrent_name + '_docker_labels_custom', default=rutorrent_docker_labels_custom)) }}"+rutorrent_role_docker_labels_custom: {}+rutorrent_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='rutorrent')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='rutorrent') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='rutorrent')) }}" # Hostname-rutorrent_docker_hostname: "{{ rutorrent_name }}"+rutorrent_role_docker_hostname: "{{ rutorrent_name }}" # Networks-rutorrent_docker_networks_alias: "{{ rutorrent_name }}"-rutorrent_docker_networks_default: []-rutorrent_docker_networks_custom: []-rutorrent_docker_networks: "{{ docker_networks_common- + rutorrent_docker_networks_default- + rutorrent_docker_networks_custom }}"--# Capabilities-rutorrent_docker_capabilities_default: []-rutorrent_docker_capabilities_custom: []-rutorrent_docker_capabilities: "{{ rutorrent_docker_capabilities_default- + rutorrent_docker_capabilities_custom }}"--# Security Opts-rutorrent_docker_security_opts_default: []-rutorrent_docker_security_opts_custom: []-rutorrent_docker_security_opts: "{{ rutorrent_docker_security_opts_default- + rutorrent_docker_security_opts_custom }}"+rutorrent_role_docker_networks_alias: "{{ rutorrent_name }}"+rutorrent_role_docker_networks_default: []+rutorrent_role_docker_networks_custom: []+rutorrent_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='rutorrent')+ + lookup('role_var', '_docker_networks_custom', role='rutorrent') }}" # Restart Policy-rutorrent_docker_restart_policy: unless-stopped+rutorrent_role_docker_restart_policy: unless-stopped # Stop Timeout-rutorrent_docker_stop_timeout: 900+rutorrent_role_docker_stop_timeout: 900 # State-rutorrent_docker_state: started+rutorrent_role_docker_state: started
modified
roles/rutorrent/tasks/main.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -22,15 +22,15 @@ - name: Check if existing config file exists ansible.builtin.stat:- path: "{{ rutorrent_paths_rtorrent_rc_location }}"- register: rutorrent_paths_rtorrent_rc_location_stat+ path: "{{ lookup('role_var', '_paths_rtorrent_rc_location', role='rutorrent') }}"+ register: rutorrent_role_paths_rtorrent_rc_location_stat - name: Pre-Install Tasks- ansible.builtin.import_tasks: "subtasks/pre-install/main.yml"+ ansible.builtin.include_tasks: "subtasks/pre-install/main.yml" when: (not continuous_integration) - name: Create Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: Post-Install Tasks- ansible.builtin.import_tasks: "subtasks/post-install/main.yml"+ ansible.builtin.include_tasks: "subtasks/post-install/main.yml"
modified
roles/rutorrent/tasks/subtasks/post-install/main.yml
@@ -12,9 +12,9 @@ path: "{{ item }}" state: present loop:- - "{{ rutorrent_paths_rtorrent_rc_location }}"- - "{{ rutorrent_paths_php_local_ini_location }}"- - "{{ rutorrent_paths_plugins_ini_location }}"+ - "{{ lookup('role_var', '_paths_rtorrent_rc_location', role='rutorrent') }}"+ - "{{ lookup('role_var', '_paths_php_local_ini_location', role='rutorrent') }}"+ - "{{ lookup('role_var', '_paths_plugins_ini_location', role='rutorrent') }}" - name: Post-Install | Wait for 60 seconds ansible.builtin.wait_for:@@ -24,7 +24,7 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/stop_docker_container.yml" - name: Post-Install | Settings Task- ansible.builtin.import_tasks: "settings/main.yml"+ ansible.builtin.include_tasks: "settings/main.yml" - name: Post-Install | Start container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_docker_container.yml"
modified
roles/rutorrent/tasks/subtasks/post-install/settings/main.yml
@@ -8,12 +8,12 @@ ######################################################################### --- - name: Post-Install | Settings | Update settings For New Installs- ansible.builtin.import_tasks: "new_installs.yml"- when: (not continuous_integration) and (not rutorrent_paths_rtorrent_rc_location_stat.stat.exists)+ ansible.builtin.include_tasks: "new_installs.yml"+ when: (not continuous_integration) and (not rutorrent_role_paths_rtorrent_rc_location_stat.stat.exists) - name: Post-Install | Settings | Create directory ansible.builtin.file:- path: "{{ rutorrent_paths_location }}/plugins/diskspace"+ path: "{{ lookup('role_var', '_paths_location', role='rutorrent') }}/plugins/diskspace" state: directory owner: "{{ user.name }}" group: "{{ user.name }}"@@ -22,7 +22,7 @@ - name: Post-Install | Settings | Import custom 'conf.php' for diskspace ansible.builtin.template: src: "diskspace_conf.php.j2"- dest: "{{ rutorrent_paths_location }}/plugins/diskspace/conf.php"+ dest: "{{ lookup('role_var', '_paths_location', role='rutorrent') }}/plugins/diskspace/conf.php" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0644"
modified
roles/rutorrent/tasks/subtasks/post-install/settings/new_installs.yml
@@ -9,7 +9,7 @@ --- - name: Post-Install | Settings | New Installs | Update 'rtorrent.rc' config settings community.general.ini_file:- path: "{{ rutorrent_paths_rtorrent_rc_location }}"+ path: "{{ lookup('role_var', '_paths_rtorrent_rc_location', role='rutorrent') }}" section: null option: "{{ item.option }}" value: "{{ item.value }}"@@ -18,11 +18,11 @@ owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"- loop: "{{ rutorrent_config_new_installs_rutorrent_rc_settings_list }}"+ loop: "{{ rutorrent_role_config_new_installs_rutorrent_rc_settings_list }}" - name: Post-Install | Settings | New Installs | Update 'php-local.ini' config settings community.general.ini_file:- path: "{{ rutorrent_paths_php_local_ini_location }}"+ path: "{{ lookup('role_var', '_paths_php_local_ini_location', role='rutorrent') }}" section: null option: "{{ item.option }}" value: "{{ item.value }}"@@ -31,13 +31,13 @@ owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"- loop: "{{ rutorrent_config_new_installs_php_local_ini_settings_list }}"+ loop: "{{ lookup('role_var', '_config_new_installs_php_local_ini_settings_list', role='rutorrent') }}" - name: Post-Install | Settings | New Installs | Import custom 'plugins.ini' ansible.builtin.copy: src: plugins.ini force: true- dest: "{{ rutorrent_paths_plugins_ini_location }}"+ dest: "{{ lookup('role_var', '_paths_plugins_ini_location', role='rutorrent') }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"
modified
roles/rutorrent/tasks/subtasks/pre-install/existing_installs.yml
@@ -9,7 +9,7 @@ --- - name: Pre-Install | Settings | Existing Installs | Update 'rtorrent.rc' config settings community.general.ini_file:- path: "{{ rutorrent_paths_rtorrent_rc_location }}"+ path: "{{ lookup('role_var', '_paths_rtorrent_rc_location', role='rutorrent') }}" section: null option: "{{ item.option }}" value: "{{ item.value }}"@@ -18,4 +18,4 @@ owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"- loop: "{{ rutorrent_config_existing_installs_rutorrent_rc_settings_list }}"+ loop: "{{ lookup('role_var', '_config_existing_installs_rutorrent_rc_settings_list', role='rutorrent') }}"
modified
roles/rutorrent/tasks/subtasks/pre-install/main.yml
@@ -9,7 +9,7 @@ --- - name: Pre-Install | Remove existing 'config.php' ansible.builtin.file:- path: "{{ rutorrent_paths_config_php_location }}"+ path: "{{ lookup('role_var', '_paths_config_php_location', role='rutorrent') }}" state: absent - name: Pre-Install | Get next available port within the range of '51413-51423' # noqa fqcn[action]@@ -29,5 +29,5 @@ ignore_errors: true - name: Pre-Install | Settings | Update settings For Existing Installs- ansible.builtin.import_tasks: "existing_installs.yml"- when: rutorrent_paths_rtorrent_rc_location_stat.stat.exists+ ansible.builtin.include_tasks: "existing_installs.yml"+ when: rutorrent_role_paths_rtorrent_rc_location_stat.stat.exists
modified
roles/rutorrent/templates/diskspace_conf.php.j2
@@ -2,4 +2,4 @@ $diskUpdateInterval = 10; // in seconds $notifySpaceLimit = 512; // in Mb-$partitionDirectory = '{{ rutorrent_config_diskspace_path }}'; // set this to the absolute path for checked partition. +$partitionDirectory = '{{ lookup('role_var', '_config_diskspace_path', role='rutorrent') }}'; // set this to the absolute path for checked partition.
modified
roles/sabnzbd/defaults/main.yml
@@ -17,178 +17,141 @@ # Paths ################################ -sabnzbd_paths_folder: "{{ sabnzbd_name }}"-sabnzbd_paths_location: "{{ server_appdata_path }}/{{ sabnzbd_paths_folder }}"-sabnzbd_paths_downloads_location: "{{ downloads_usenet_path }}/{{ sabnzbd_paths_folder }}"-sabnzbd_paths_folders_list:- - "{{ sabnzbd_paths_location }}"- - "{{ sabnzbd_paths_downloads_location }}"- - "{{ sabnzbd_paths_downloads_location }}/complete"- - "{{ sabnzbd_paths_downloads_location }}/incomplete"- - "{{ sabnzbd_paths_downloads_location }}/watch"-sabnzbd_paths_config_location: "{{ sabnzbd_paths_location }}/sabnzbd.ini"+sabnzbd_role_paths_folder: "{{ sabnzbd_name }}"+sabnzbd_role_paths_location: "{{ server_appdata_path }}/{{ sabnzbd_role_paths_folder }}"+sabnzbd_role_paths_downloads_location: "{{ downloads_usenet_path }}/{{ sabnzbd_role_paths_folder }}"+sabnzbd_role_paths_folders_list:+ - "{{ sabnzbd_role_paths_location }}"+ - "{{ sabnzbd_role_paths_downloads_location }}"+ - "{{ sabnzbd_role_paths_downloads_location }}/complete"+ - "{{ sabnzbd_role_paths_downloads_location }}/incomplete"+ - "{{ sabnzbd_role_paths_downloads_location }}/watch"+sabnzbd_role_paths_config_location: "{{ sabnzbd_role_paths_location }}/sabnzbd.ini" ################################ # Web ################################ -sabnzbd_web_subdomain: "{{ sabnzbd_name }}"-sabnzbd_web_domain: "{{ user.domain }}"-sabnzbd_web_port: "8080"-sabnzbd_web_url: "{{ 'https://' + (sabnzbd_web_subdomain + '.' + sabnzbd_web_domain- if (sabnzbd_web_subdomain | length > 0)- else sabnzbd_web_domain) }}"-sabnzbd_web_local_url: "{{ 'http://' + sabnzbd_name + ':' + sabnzbd_web_port }}"+sabnzbd_role_web_subdomain: "{{ sabnzbd_name }}"+sabnzbd_role_web_domain: "{{ user.domain }}"+sabnzbd_role_web_port: "8080"+sabnzbd_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='sabnzbd') + '.' + lookup('role_var', '_web_domain', role='sabnzbd')+ if (lookup('role_var', '_web_subdomain', role='sabnzbd') | length > 0)+ else lookup('role_var', '_web_domain', role='sabnzbd')) }}"+sabnzbd_role_web_local_url: "{{ 'http://' + sabnzbd_name + ':' + lookup('role_var', '_web_port', role='sabnzbd') }}" ################################ # DNS ################################ -sabnzbd_dns_record: "{{ sabnzbd_web_subdomain }}"-sabnzbd_dns_zone: "{{ sabnzbd_web_domain }}"-sabnzbd_dns_proxy: "{{ dns.proxied }}"+sabnzbd_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='sabnzbd') }}"+sabnzbd_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='sabnzbd') }}"+sabnzbd_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -sabnzbd_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-sabnzbd_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', sabnzbd_name + '_name', default=sabnzbd_name)- if (sabnzbd_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-sabnzbd_traefik_middleware_custom: ""-sabnzbd_traefik_certresolver: "{{ traefik_default_certresolver }}"-sabnzbd_traefik_enabled: true-sabnzbd_traefik_api_enabled: true-sabnzbd_traefik_api_endpoint: "PathPrefix(`/api`)"+sabnzbd_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+sabnzbd_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + sabnzbd_name+ if (lookup('role_var', '_themepark_enabled', role='sabnzbd') and global_themepark_plugin_enabled)+ else '') }}"+sabnzbd_role_traefik_middleware_custom: ""+sabnzbd_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+sabnzbd_role_traefik_enabled: true+sabnzbd_role_traefik_api_enabled: true+sabnzbd_role_traefik_api_endpoint: "PathPrefix(`/api`)" ################################ # Config ################################ -sabnzbd_config_settings_web:+sabnzbd_role_config_settings_web: # Web- - { option: "host_whitelist", value: "{{ sabnzbd_web_subdomain }}.{{ sabnzbd_web_domain }}, {{ sabnzbd_name }}" }+ - { option: "host_whitelist", value: "{{ lookup('role_var', '_web_subdomain', role='sabnzbd') }}.{{ lookup('role_var', '_web_domain', role='sabnzbd') }}, {{ sabnzbd_name }}" } - { option: "url_base", value: "" } - { option: "log_dir", value: "/config/logs" } -sabnzbd_config_settings_default:+sabnzbd_role_config_settings_default: # Web- - { option: "host_whitelist", value: "{{ sabnzbd_web_subdomain }}.{{ sabnzbd_web_domain }}, {{ sabnzbd_name }}" }+ - { option: "host_whitelist", value: "{{ lookup('role_var', '_web_subdomain', role='sabnzbd') }}.{{ lookup('role_var', '_web_domain', role='sabnzbd') }}, {{ sabnzbd_name }}" } - { option: "url_base", value: "" } # Paths- - { option: "dirscan_dir", value: "{{ sabnzbd_paths_downloads_location }}/watch" }- - { option: "download_dir", value: "{{ sabnzbd_paths_downloads_location }}/incomplete" }- - { option: "complete_dir", value: "{{ sabnzbd_paths_downloads_location }}/complete" }+ - { option: "dirscan_dir", value: "{{ lookup('role_var', '_paths_downloads_location', role='sabnzbd') }}/watch" }+ - { option: "download_dir", value: "{{ lookup('role_var', '_paths_downloads_location', role='sabnzbd') }}/incomplete" }+ - { option: "complete_dir", value: "{{ lookup('role_var', '_paths_downloads_location', role='sabnzbd') }}/complete" } - { option: "log_dir", value: "/config/logs" } -sabnzbd_config_settings_custom: []+sabnzbd_role_config_settings_custom: [] -sabnzbd_config_settings_list: "{{ sabnzbd_config_settings_default- + sabnzbd_config_settings_custom }}"+sabnzbd_role_config_settings_list: "{{ lookup('role_var', '_config_settings_default', role='sabnzbd')+ + lookup('role_var', '_config_settings_custom', role='sabnzbd') }}" ################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-sabnzbd_themepark_enabled: false-sabnzbd_themepark_app: "sabnzbd"-sabnzbd_themepark_theme: "{{ global_themepark_theme }}"-sabnzbd_themepark_domain: "{{ global_themepark_domain }}"-sabnzbd_themepark_addons: []+sabnzbd_role_themepark_enabled: false+sabnzbd_role_themepark_app: "sabnzbd"+sabnzbd_role_themepark_theme: "{{ global_themepark_theme }}"+sabnzbd_role_themepark_domain: "{{ global_themepark_domain }}"+sabnzbd_role_themepark_addons: [] ################################ # Docker ################################ # Container-sabnzbd_docker_container: "{{ sabnzbd_name }}"+sabnzbd_role_docker_container: "{{ sabnzbd_name }}" # Image-sabnzbd_docker_image_pull: true-sabnzbd_docker_image_tag: "latest"-sabnzbd_docker_image: "ghcr.io/hotio/sabnzbd:{{ sabnzbd_docker_image_tag }}"--# Ports-sabnzbd_docker_ports_defaults: []-sabnzbd_docker_ports_custom: []-sabnzbd_docker_ports: "{{ sabnzbd_docker_ports_defaults- + sabnzbd_docker_ports_custom }}"+sabnzbd_role_docker_image_pull: true+sabnzbd_role_docker_image_repo: "ghcr.io/hotio/sabnzbd"+sabnzbd_role_docker_image_tag: "latest"+sabnzbd_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='sabnzbd') }}:{{ lookup('role_var', '_docker_image_tag', role='sabnzbd') }}" # Envs-sabnzbd_docker_envs_default:+sabnzbd_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" UMASK: "002" TZ: "{{ tz }}"-sabnzbd_docker_envs_custom: {}-sabnzbd_docker_envs: "{{ sabnzbd_docker_envs_default- | combine(sabnzbd_docker_envs_custom) }}"--# Commands-sabnzbd_docker_commands_default: []-sabnzbd_docker_commands_custom: []-sabnzbd_docker_commands: "{{ sabnzbd_docker_commands_default- + sabnzbd_docker_commands_custom }}"+sabnzbd_role_docker_envs_custom: {}+sabnzbd_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='sabnzbd')+ | combine(lookup('role_var', '_docker_envs_custom', role='sabnzbd')) }}" # Volumes-sabnzbd_docker_volumes_default:- - "{{ sabnzbd_paths_location }}:/config"+sabnzbd_role_docker_volumes_default:+ - "{{ sabnzbd_role_paths_location }}:/config" - "{{ server_appdata_path }}/scripts:/scripts"-sabnzbd_docker_volumes_custom: []-sabnzbd_docker_volumes: "{{ sabnzbd_docker_volumes_default- + sabnzbd_docker_volumes_custom }}"--# Devices-sabnzbd_docker_devices_default: []-sabnzbd_docker_devices_custom: []-sabnzbd_docker_devices: "{{ sabnzbd_docker_devices_default- + sabnzbd_docker_devices_custom }}"--# Hosts-sabnzbd_docker_hosts_default: {}-sabnzbd_docker_hosts_custom: {}-sabnzbd_docker_hosts: "{{ docker_hosts_common- | combine(sabnzbd_docker_hosts_default)- | combine(sabnzbd_docker_hosts_custom) }}"+sabnzbd_role_docker_volumes_custom: []+sabnzbd_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='sabnzbd')+ + lookup('role_var', '_docker_volumes_custom', role='sabnzbd') }}" # Labels-sabnzbd_docker_labels_default: {}-sabnzbd_docker_labels_custom: {}-sabnzbd_docker_labels: "{{ docker_labels_common- | combine(sabnzbd_docker_labels_default)- | combine((traefik_themepark_labels- if (sabnzbd_themepark_enabled and global_themepark_plugin_enabled)- else {}),- sabnzbd_docker_labels_custom) }}"+sabnzbd_role_docker_labels_default: {}+sabnzbd_role_docker_labels_custom: {}+sabnzbd_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='sabnzbd')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='sabnzbd') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='sabnzbd')) }}" # Hostname-sabnzbd_docker_hostname: "{{ sabnzbd_name }}"+sabnzbd_role_docker_hostname: "{{ sabnzbd_name }}" # Networks-sabnzbd_docker_networks_alias: "{{ sabnzbd_name }}"-sabnzbd_docker_networks_default: []-sabnzbd_docker_networks_custom: []-sabnzbd_docker_networks: "{{ docker_networks_common- + sabnzbd_docker_networks_default- + sabnzbd_docker_networks_custom }}"--# Capabilities-sabnzbd_docker_capabilities_default: []-sabnzbd_docker_capabilities_custom: []-sabnzbd_docker_capabilities: "{{ sabnzbd_docker_capabilities_default- + sabnzbd_docker_capabilities_custom }}"--# Security Opts-sabnzbd_docker_security_opts_default: []-sabnzbd_docker_security_opts_custom: []-sabnzbd_docker_security_opts: "{{ sabnzbd_docker_security_opts_default- + sabnzbd_docker_security_opts_custom }}"+sabnzbd_role_docker_networks_alias: "{{ sabnzbd_name }}"+sabnzbd_role_docker_networks_default: []+sabnzbd_role_docker_networks_custom: []+sabnzbd_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='sabnzbd')+ + lookup('role_var', '_docker_networks_custom', role='sabnzbd') }}" # Restart Policy-sabnzbd_docker_restart_policy: unless-stopped+sabnzbd_role_docker_restart_policy: unless-stopped # State-sabnzbd_docker_state: started+sabnzbd_role_docker_state: started
modified
roles/sabnzbd/tasks/main.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -22,12 +22,12 @@ - name: Check if existing config exists ansible.builtin.stat:- path: "{{ sabnzbd_paths_config_location }}"+ path: "{{ sabnzbd_role_paths_config_location }}" register: sabnzbd_conf_stat - name: Create Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: Post-Install Tasks- ansible.builtin.import_tasks: "subtasks/post-install/main.yml"+ ansible.builtin.include_tasks: "subtasks/post-install/main.yml" when: (not continuous_integration)
modified
roles/sabnzbd/tasks/subtasks/post-install/main.yml
@@ -8,9 +8,9 @@ ######################################################################### --- - name: Post-Install | Settings Task- ansible.builtin.import_tasks: "settings.yml"+ ansible.builtin.include_tasks: "settings.yml" when: (not sabnzbd_conf_stat.stat.exists) - name: Post-Install | Web Task- ansible.builtin.import_tasks: "web.yml"+ ansible.builtin.include_tasks: "web.yml" when: sabnzbd_conf_stat.stat.exists
modified
roles/sabnzbd/tasks/subtasks/post-install/settings.yml
@@ -9,7 +9,7 @@ --- - name: Post-Install | Settings | Wait for config file to be created ansible.builtin.wait_for:- path: "{{ sabnzbd_paths_config_location }}"+ path: "{{ sabnzbd_role_paths_config_location }}" state: present - name: Post-Install | Settings | Stop container@@ -21,7 +21,7 @@ - name: Post-Install | Settings | Update config settings community.general.ini_file:- path: "{{ sabnzbd_paths_config_location }}"+ path: "{{ sabnzbd_role_paths_config_location }}" section: misc option: "{{ item.option }}" value: "{{ item.value }}"@@ -29,7 +29,7 @@ owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"- loop: "{{ sabnzbd_config_settings_list }}"+ loop: "{{ sabnzbd_role_config_settings_list }}" - name: Post-Install | Settings | Start container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_docker_container.yml"
modified
roles/sabnzbd/tasks/subtasks/post-install/web.yml
@@ -9,7 +9,7 @@ --- - name: Post-Install | Web | Wait for config file to be created ansible.builtin.wait_for:- path: "{{ sabnzbd_paths_config_location }}"+ path: "{{ sabnzbd_role_paths_config_location }}" state: present - name: Post-Install | Web | Stop container@@ -21,7 +21,7 @@ - name: Post-Install | Web | Update config settings community.general.ini_file:- path: "{{ sabnzbd_paths_config_location }}"+ path: "{{ sabnzbd_role_paths_config_location }}" section: misc option: "{{ item.option }}" value: "{{ item.value }}"@@ -29,7 +29,7 @@ owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"- loop: "{{ sabnzbd_config_settings_web }}"+ loop: "{{ sabnzbd_role_config_settings_web }}" - name: Post-Install | Web | Start container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_docker_container.yml"
modified
roles/saltbox_mod/tasks/main.yml
@@ -16,15 +16,14 @@ mode: "0775" recurse: true with_items:- - /opt/saltbox_mod+ - "{{ server_appdata_path }}/saltbox_mod" - name: Clone saltbox_mod repo ansible.builtin.git: repo: "{{ saltbox_mod_repo }}"- dest: /opt/saltbox_mod+ dest: "{{ server_appdata_path }}/saltbox_mod" clone: true version: "{{ saltbox_mod_branch }}" force: "{{ saltbox_mod_force_overwrite }}" become: true become_user: "{{ user.name }}"- ignore_errors: true
modified
roles/sandbox/tasks/main.yml
@@ -9,7 +9,7 @@ --- - name: Create Sandbox directory ansible.builtin.file:- path: "/opt/sandbox"+ path: "{{ server_appdata_path }}/sandbox" state: directory owner: "{{ user.name }}" group: "{{ user.name }}"@@ -18,13 +18,13 @@ - name: Check if git repository exists ansible.builtin.stat:- path: "/opt/sandbox/.git"+ path: "{{ server_appdata_path }}/sandbox/.git" register: git_repo_check - name: Clone Sandbox repo 'HEAD' ansible.builtin.git: repo: https://github.com/saltyorg/sandbox.git- dest: /opt/sandbox+ dest: "{{ server_appdata_path }}/sandbox" clone: true version: HEAD force: true@@ -37,7 +37,7 @@ - name: Clone Sandbox repo 'master' ansible.builtin.git: repo: https://github.com/saltyorg/sandbox.git- dest: /opt/sandbox+ dest: "{{ server_appdata_path }}/sandbox" clone: true version: master force: true@@ -50,15 +50,15 @@ - name: Import default files when missing ansible.builtin.copy: src: "{{ item }}"- dest: "/opt/sandbox/{{ (item | basename | splitext)[0] }}"+ dest: "{{ server_appdata_path }}/sandbox/{{ (item | basename | splitext)[0] }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664" force: false with_fileglob:- - /opt/sandbox/defaults/*.*+ - "{{ server_appdata_path }}/sandbox/defaults/*.*" - name: Activate Git Hooks- ansible.builtin.shell: bash /opt/sandbox/bin/git/init-hooks+ ansible.builtin.shell: bash {{ server_appdata_path }}/sandbox/bin/git/init-hooks args:- chdir: "/opt/sandbox"+ chdir: "{{ server_appdata_path }}/sandbox"
modified
roles/sanity_check/tasks/main.yml
@@ -8,31 +8,31 @@ ######################################################################### --- - name: System Check- ansible.builtin.import_tasks: "subtasks/01_system.yml"+ ansible.builtin.include_tasks: "subtasks/01_system.yml" - name: Ansible Version Check- ansible.builtin.import_tasks: "subtasks/02_ansible_version.yml"+ ansible.builtin.include_tasks: "subtasks/02_ansible_version.yml" - name: Python Version Check- ansible.builtin.import_tasks: "subtasks/03_python_version.yml"+ ansible.builtin.include_tasks: "subtasks/03_python_version.yml" - name: UFW Check ansible.builtin.include_tasks: "subtasks/04_ufw.yml" - name: Backup Lock Check- ansible.builtin.import_tasks: "subtasks/05_backup.yml"+ ansible.builtin.include_tasks: "subtasks/05_backup.yml" - name: Touch Logs- ansible.builtin.import_tasks: "subtasks/06_logs.yml"+ ansible.builtin.include_tasks: "subtasks/06_logs.yml" - name: Create Temp Folder- ansible.builtin.import_tasks: "subtasks/07_tmp.yml"+ ansible.builtin.include_tasks: "subtasks/07_tmp.yml" - name: Skipped Tags Check- ansible.builtin.import_tasks: "subtasks/08_skipped_tags.yml"+ ansible.builtin.include_tasks: "subtasks/08_skipped_tags.yml" - name: Repository Check- ansible.builtin.import_tasks: "subtasks/09_repo.yml"+ ansible.builtin.include_tasks: "subtasks/09_repo.yml" - name: CPU Check- ansible.builtin.import_tasks: "subtasks/10_cpu.yml"+ ansible.builtin.include_tasks: "subtasks/10_cpu.yml"
modified
roles/sanity_check/tasks/subtasks/01_system.yml
@@ -10,11 +10,11 @@ - name: System | Ensure Ansible is running on Ubuntu 20.04+ ansible.builtin.assert: that:- - ((ansible_distribution == 'Ubuntu') and (ansible_facts['distribution_version'] is version('20.04', '>=')))+ - ((ansible_facts['distribution'] == 'Ubuntu') and (ansible_facts['distribution_version'] is version('20.04', '>='))) fail_msg: >- Saltbox was designed for use on Ubuntu servers running version 20.04+. success_msg: >-- System is running {{ ansible_lsb.description if ansible_lsb is defined else (ansible_distribution + ' ' + ansible_distribution_version) }}.+ System is running {{ ansible_facts['lsb']['description'] }}. - name: System | Virtualization Check ansible.builtin.command: "systemd-detect-virt"
modified
roles/sanity_check/tasks/subtasks/03_python_version.yml
@@ -9,4 +9,4 @@ --- - name: Python Version | Print Python version ansible.builtin.debug:- msg: "Ansible running on Python version: {{ ansible_python_version }}"+ msg: "Ansible running on Python version: {{ ansible_facts['python_version'] }}"
modified
roles/sanity_check/tasks/subtasks/05_backup.yml
@@ -15,7 +15,7 @@ # Age in hours - name: Backup | Get age of '{{ sanity_check_backup_lockfile_path | basename }}' file ansible.builtin.set_fact:- backup_lock_age: "{{ ((ansible_date_time.epoch | float - backup_lock.stat.mtime) / 3600) | int }}"+ backup_lock_age: "{{ ((ansible_facts['date_time']['epoch'] | float - backup_lock.stat.mtime) / 3600) | int }}" when: backup_lock.stat.exists # Delete if older than 2 hours.
modified
roles/scripts/tasks/main.yml
@@ -9,7 +9,7 @@ --- - name: Install common packages ansible.builtin.apt:- state: present+ state: latest name: - jq - figlet@@ -26,21 +26,19 @@ mode: "0775" recurse: true with_items:- - /opt/scripts- - /opt/scripts/docker- - /opt/scripts/frontail- - /opt/scripts/nzbget- - /opt/scripts/sabnzbd- - /opt/scripts/plex- - /opt/scripts/plex_autoscan- - /opt/scripts/tautulli- - /opt/scripts/torrents- - /opt/scripts/rclone+ - "{{ server_appdata_path }}/scripts"+ - "{{ server_appdata_path }}/scripts/frontail"+ - "{{ server_appdata_path }}/scripts/nzbget"+ - "{{ server_appdata_path }}/scripts/sabnzbd"+ - "{{ server_appdata_path }}/scripts/plex"+ - "{{ server_appdata_path }}/scripts/tautulli"+ - "{{ server_appdata_path }}/scripts/torrents"+ - "{{ server_appdata_path }}/scripts/rclone" - name: Import 'arrpush.py' ansible.builtin.get_url: url: "https://raw.githubusercontent.com/l3uddz/arrpush/master/arrpush.py"- dest: "/opt/scripts/torrents/arrpush.py"+ dest: "{{ server_appdata_path }}/scripts/torrents/arrpush.py" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775"@@ -57,7 +55,7 @@ - name: Import 'TorrentCleanup.py' ansible.builtin.get_url: url: "https://raw.githubusercontent.com/l3uddz/TorrentCleanup/master/TorrentCleanup.py"- dest: "/opt/scripts/torrents/TorrentCleanup.py"+ dest: "{{ server_appdata_path }}/scripts/torrents/TorrentCleanup.py" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775"@@ -74,7 +72,7 @@ - name: Import 'plex_trash_fixer.py' ansible.builtin.get_url: url: "https://raw.githubusercontent.com/l3uddz/plex_trash_fixer/master/plex_trash_fixer.py"- dest: "/opt/scripts/plex/plex_trash_fixer.py"+ dest: "{{ server_appdata_path }}/scripts/plex/plex_trash_fixer.py" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775"@@ -88,36 +86,9 @@ delay: 10 ignore_errors: true -- name: Import 'plex_autoscan_url.sh'- ansible.builtin.copy:- src: "plex_autoscan_url.sh"- dest: "/opt/scripts/plex_autoscan/plex_autoscan_url.sh"- owner: "{{ user.name }}"- group: "{{ user.name }}"- mode: "0775"- force: true--- name: Import 'plexsql.sh'- ansible.builtin.copy:- src: "plexsql.sh"- dest: "/opt/scripts/plex/plexsql.sh"- owner: "{{ user.name }}"- group: "{{ user.name }}"- mode: "0775"- force: true--- name: Import 'restart_containers.sh'- ansible.builtin.copy:- src: "restart_containers.sh"- dest: "/opt/scripts/docker/restart_containers.sh"- owner: "{{ user.name }}"- group: "{{ user.name }}"- mode: "0775"- force: true- - name: Check if 'frontail_custom_preset.json' exists ansible.builtin.stat:- path: "/opt/scripts/frontail/frontail_custom_preset.json"+ path: "{{ server_appdata_path }}/scripts/frontail/frontail_custom_preset.json" get_attributes: false get_checksum: false get_mime: false@@ -126,7 +97,7 @@ - name: Import 'frontail_custom_preset.json' ansible.builtin.copy: src: "frontail_custom_preset.json"- dest: "/opt/scripts/frontail/frontail_custom_preset.json"+ dest: "{{ server_appdata_path }}/scripts/frontail/frontail_custom_preset.json" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775"@@ -136,7 +107,7 @@ - name: Import 'sync_torrents_to_rclone_remote.sh' ansible.builtin.template: src: "sync_torrents_to_rclone_remote.sh.j2"- dest: "/opt/scripts/rclone/sync_torrents_to_rclone_remote.sh"+ dest: "{{ server_appdata_path }}/scripts/rclone/sync_torrents_to_rclone_remote.sh" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775"@@ -146,7 +117,7 @@ - name: Import 'download_torrents_from_rclone_remote.sh' ansible.builtin.template: src: "download_torrents_from_rclone_remote.sh.j2"- dest: "/opt/scripts/rclone/download_torrents_from_rclone_remote.sh"+ dest: "{{ server_appdata_path }}/scripts/rclone/download_torrents_from_rclone_remote.sh" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775"@@ -156,7 +127,7 @@ - name: Import 'StickyDownloadQueue.py' ansible.builtin.get_url: url: "https://raw.githubusercontent.com/Hidendra/nzbget-sticky-download-queue/master/StickyDownloadQueue.py"- dest: "/opt/scripts/nzbget/StickyDownloadQueue.py"+ dest: "{{ server_appdata_path }}/scripts/nzbget/StickyDownloadQueue.py" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775"@@ -173,7 +144,7 @@ - name: Import 'CappedDownloadQueue.py' ansible.builtin.get_url: url: "https://raw.githubusercontent.com/Hidendra/nzbget-capped-download-queue/master/CappedDownloadQueue.py"- dest: "/opt/scripts/nzbget/CappedDownloadQueue.py"+ dest: "{{ server_appdata_path }}/scripts/nzbget/CappedDownloadQueue.py" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775"
modified
roles/scripts/templates/download_torrents_from_rclone_remote.sh.j2
@@ -13,7 +13,7 @@ if ! /usr/bin/screen -list | /bin/grep -q "torrents_download"; then - /bin/rm -rfv /opt/scripts/rclone/download_torrents_from_rclone_remote.log+ /bin/rm -rfv {{ server_appdata_path }}/scripts/rclone/download_torrents_from_rclone_remote.log /usr/bin/screen -dmS torrents_download /usr/bin/rclone copy \ --user-agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36' \@@ -24,7 +24,7 @@ --checkers=16 \ --drive-chunk-size=128M \ --fast-list \- --log-file=/opt/scripts/rclone/download_torrents_from_rclone_remote.log \+ --log-file={{ server_appdata_path }}/scripts/rclone/download_torrents_from_rclone_remote.log \ {{ rclone_first_remote_name }}:/downloads/torrents {{ downloads_torrents_path }} fi
modified
roles/scripts/templates/sync_torrents_to_rclone_remote.sh.j2
@@ -12,7 +12,7 @@ if ! /usr/bin/screen -list | /bin/grep -q "torrents_sync"; then - /bin/rm -rfv /opt/scripts/rclone/sync_torrents_to_rclone_remote.log+ /bin/rm -rfv {{ server_appdata_path }}/scripts/rclone/sync_torrents_to_rclone_remote.log /usr/bin/screen -dmS torrents_sync /usr/bin/rclone sync \ --user-agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36' \@@ -23,7 +23,7 @@ --checkers=16 \ --drive-chunk-size=128M \ --fast-list \- --log-file=/opt/scripts/rclone/sync_torrents_to_rclone_remote.log \+ --log-file={{ server_appdata_path }}/scripts/rclone/sync_torrents_to_rclone_remote.log \ {{ downloads_torrents_path }} {{ rclone_first_remote_name }}:/downloads/torrents fi
modified
roles/scrutiny/defaults/main.yml
@@ -17,130 +17,87 @@ # Paths ################################ -scrutiny_paths_folder: "{{ scrutiny_name }}"-scrutiny_paths_location: "{{ server_appdata_path }}/{{ scrutiny_paths_folder }}"-scrutiny_paths_folders_list:- - "{{ scrutiny_paths_location }}"- - "{{ scrutiny_paths_location }}/scrutiny"- - "{{ scrutiny_paths_location }}/influxdb"+scrutiny_role_paths_folder: "{{ scrutiny_name }}"+scrutiny_role_paths_location: "{{ server_appdata_path }}/{{ scrutiny_role_paths_folder }}"+scrutiny_role_paths_folders_list:+ - "{{ scrutiny_role_paths_location }}"+ - "{{ scrutiny_role_paths_location }}/scrutiny"+ - "{{ scrutiny_role_paths_location }}/influxdb" ################################ # Web ################################ -scrutiny_web_subdomain: "{{ scrutiny_name }}"-scrutiny_web_domain: "{{ user.domain }}"-scrutiny_web_port: "8080"-scrutiny_web_url: "{{ 'https://' + (scrutiny_web_subdomain + '.' + scrutiny_web_domain- if (scrutiny_web_subdomain | length > 0)- else scrutiny_web_domain) }}"+scrutiny_role_web_subdomain: "{{ scrutiny_name }}"+scrutiny_role_web_domain: "{{ user.domain }}"+scrutiny_role_web_port: "8080"+scrutiny_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='scrutiny') + '.' + lookup('role_var', '_web_domain', role='scrutiny')+ if (lookup('role_var', '_web_subdomain', role='scrutiny') | length > 0)+ else lookup('role_var', '_web_domain', role='scrutiny')) }}" ################################ # DNS ################################ -scrutiny_dns_record: "{{ scrutiny_web_subdomain }}"-scrutiny_dns_zone: "{{ scrutiny_web_domain }}"-scrutiny_dns_proxy: "{{ dns.proxied }}"+scrutiny_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='scrutiny') }}"+scrutiny_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='scrutiny') }}"+scrutiny_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -scrutiny_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-scrutiny_traefik_middleware_default: "{{ traefik_default_middleware }}"-scrutiny_traefik_middleware_custom: ""-scrutiny_traefik_certresolver: "{{ traefik_default_certresolver }}"-scrutiny_traefik_enabled: true+scrutiny_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+scrutiny_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+scrutiny_role_traefik_middleware_custom: ""+scrutiny_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+scrutiny_role_traefik_enabled: true ################################ # Docker ################################ # Container-scrutiny_docker_container: "{{ scrutiny_name }}"+scrutiny_role_docker_container: "{{ scrutiny_name }}" # Image-scrutiny_docker_image_pull: true-scrutiny_docker_image_tag: "master-omnibus"-scrutiny_docker_image: "ghcr.io/analogj/scrutiny:{{ scrutiny_docker_image_tag }}"--# Ports-scrutiny_docker_ports_defaults: []-scrutiny_docker_ports_custom: []-scrutiny_docker_ports: "{{ scrutiny_docker_ports_defaults- + scrutiny_docker_ports_custom }}"+scrutiny_role_docker_image_pull: true+scrutiny_role_docker_image_repo: "ghcr.io/analogj/scrutiny"+scrutiny_role_docker_image_tag: "master-omnibus"+scrutiny_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='scrutiny') }}:{{ lookup('role_var', '_docker_image_tag', role='scrutiny') }}" # Envs-scrutiny_docker_envs_default:+scrutiny_role_docker_envs_default: TZ: "{{ tz }}"-scrutiny_docker_envs_custom: {}-scrutiny_docker_envs: "{{ scrutiny_docker_envs_default- | combine(scrutiny_docker_envs_custom) }}"--# Commands-scrutiny_docker_commands_default: []-scrutiny_docker_commands_custom: []-scrutiny_docker_commands: "{{ scrutiny_docker_commands_default- + scrutiny_docker_commands_custom }}"+scrutiny_role_docker_envs_custom: {}+scrutiny_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='scrutiny')+ | combine(lookup('role_var', '_docker_envs_custom', role='scrutiny')) }}" # Volumes-scrutiny_docker_volumes_default:- - "{{ scrutiny_paths_location }}/scrutiny:/opt/scrutiny/config"- - "{{ scrutiny_paths_location }}/influxdb:/opt/scrutiny/influxdb"+scrutiny_role_docker_volumes_default:+ - "{{ lookup('role_var', '_paths_location', role='scrutiny') }}/scrutiny:/opt/scrutiny/config"+ - "{{ lookup('role_var', '_paths_location', role='scrutiny') }}/influxdb:/opt/scrutiny/influxdb" - "/run/udev:/run/udev:ro"-scrutiny_docker_volumes_custom: []-scrutiny_docker_volumes: "{{ scrutiny_docker_volumes_default- + scrutiny_docker_volumes_custom }}"--# Devices-scrutiny_docker_devices_default: []-scrutiny_docker_devices_custom: []-scrutiny_docker_devices: "{{ scrutiny_docker_devices_default- + scrutiny_docker_devices_custom }}"--# Hosts-scrutiny_docker_hosts_default: {}-scrutiny_docker_hosts_custom: {}-scrutiny_docker_hosts: "{{ docker_hosts_common- | combine(scrutiny_docker_hosts_default)- | combine(scrutiny_docker_hosts_custom) }}"--# Labels-scrutiny_docker_labels_default: {}-scrutiny_docker_labels_custom: {}-scrutiny_docker_labels: "{{ docker_labels_common- | combine(scrutiny_docker_labels_default)- | combine(scrutiny_docker_labels_custom) }}"+scrutiny_role_docker_volumes_custom: []+scrutiny_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='scrutiny')+ + lookup('role_var', '_docker_volumes_custom', role='scrutiny') }}" # Hostname-scrutiny_docker_hostname: "{{ scrutiny_name }}"+scrutiny_role_docker_hostname: "{{ scrutiny_name }}" # Networks-scrutiny_docker_networks_alias: "{{ scrutiny_name }}"-scrutiny_docker_networks_default: []-scrutiny_docker_networks_custom: []-scrutiny_docker_networks: "{{ docker_networks_common- + scrutiny_docker_networks_default- + scrutiny_docker_networks_custom }}"--# Capabilities-scrutiny_docker_capabilities_default: []-scrutiny_docker_capabilities_custom: []-scrutiny_docker_capabilities: "{{ scrutiny_docker_capabilities_default- + scrutiny_docker_capabilities_custom }}"--# Security Opts-scrutiny_docker_security_opts_default: []-scrutiny_docker_security_opts_custom: []-scrutiny_docker_security_opts: "{{ scrutiny_docker_security_opts_default- + scrutiny_docker_security_opts_custom }}"+scrutiny_role_docker_networks_alias: "{{ scrutiny_name }}"+scrutiny_role_docker_networks_default: []+scrutiny_role_docker_networks_custom: []+scrutiny_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='scrutiny')+ + lookup('role_var', '_docker_networks_custom', role='scrutiny') }}" # Restart Policy-scrutiny_docker_restart_policy: unless-stopped+scrutiny_role_docker_restart_policy: unless-stopped # State-scrutiny_docker_state: started+scrutiny_role_docker_state: started # Privileged-scrutiny_docker_privileged: true+scrutiny_role_docker_privileged: true
modified
roles/scrutiny/tasks/main.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"
modified
roles/shell/defaults/main.yml
@@ -26,7 +26,7 @@ ################################ shell_z_git_repo_url: "https://github.com/rupa/z.git"-shell_z_git_repo_dest: "/opt/z"+shell_z_git_repo_dest: "{{ server_appdata_path }}/z" ################################ # Bash@@ -64,7 +64,7 @@ DISABLE_UPDATE_PROMPT=true shell_zsh_zshrc_block_content2: |- # zsh - allows commmands to run with the un-expanded glob+ # zsh - allows commands to run with the un-expanded glob unsetopt nomatch # zsh - set TIMEFMT export TIMEFMT=$'
modified
roles/shell/tasks/subtasks/misc.yml
@@ -9,13 +9,13 @@ --- - name: Misc | Install argcomplete ansible.builtin.shell: "pip install {{ shell_misc_argcomplete_pip_package }}"- when: ansible_distribution_version is version('22.04', '<=')+ when: ansible_facts['distribution_version'] is version('22.04', '<=') - name: Misc | Install argcomplete ansible.builtin.apt: name: python3-argcomplete state: present- when: ansible_distribution_version is version('24.04', '>=')+ when: ansible_facts['distribution_version'] is version('24.04', '>=') # For Ansible command line utilities - name: Misc | Activate argcomplete
modified
roles/shell/tasks/subtasks/shell/zsh.yml
@@ -10,7 +10,7 @@ - name: ZSH | Install 'zsh' ansible.builtin.apt: name: "{{ shell_zsh_apt_packages_list }}"- state: present+ state: latest - name: ZSH | Clone 'oh-my-zsh' repo ansible.builtin.git:@@ -21,8 +21,6 @@ force: true become: true become_user: "{{ user.name }}"- register: cloning- ignore_errors: true - name: ZSH | Check if an existing '.zshrc' file is present ansible.builtin.stat:@@ -36,7 +34,7 @@ group: "{{ user.name }}" owner: "{{ user.name }}" mode: "0664"- when: (cloning is success) and (not existing_zshrc.stat.exists)+ when: (not existing_zshrc.stat.exists) - name: ZSH | Add items to '{{ shell_zsh_zshrc_path }}' ansible.builtin.blockinfile:
modified
roles/shell/tasks/subtasks/z.yml
@@ -25,10 +25,3 @@ force: true become: true become_user: "{{ user.name }}"- register: shell_z_git_clone_status- ignore_errors: true--- name: z | 'z' Installed- ansible.builtin.debug:- msg: "'z (jump around)' Installed"- when: (shell_z_git_clone_status is succeeded)
modified
roles/sonarr/defaults/main.yml
@@ -17,171 +17,121 @@ # Settings ################################ -sonarr_external_auth: true+sonarr_role_external_auth: true ################################ # Paths ################################ -sonarr_paths_folder: "{{ sonarr_name }}"-sonarr_paths_location: "{{ server_appdata_path }}/{{ sonarr_paths_folder }}"-sonarr_paths_folders_list:- - "{{ sonarr_paths_location }}"-sonarr_paths_config_location: "{{ sonarr_paths_location }}/config.xml"+sonarr_role_paths_folder: "{{ sonarr_name }}"+sonarr_role_paths_location: "{{ server_appdata_path }}/{{ sonarr_role_paths_folder }}"+sonarr_role_paths_folders_list:+ - "{{ sonarr_role_paths_location }}"+sonarr_role_paths_config_location: "{{ sonarr_role_paths_location }}/config.xml" ################################ # Web ################################ -sonarr_web_subdomain: "{{ sonarr_name }}"-sonarr_web_domain: "{{ user.domain }}"-sonarr_web_port: "8989"-sonarr_web_url: "{{ 'https://' + (lookup('vars', sonarr_name + '_web_subdomain', default=sonarr_web_subdomain) + '.' + lookup('vars', sonarr_name + '_web_domain', default=sonarr_web_domain)- if (lookup('vars', sonarr_name + '_web_subdomain', default=sonarr_web_subdomain) | length > 0)- else lookup('vars', sonarr_name + '_web_domain', default=sonarr_web_domain)) }}"+sonarr_role_web_subdomain: "{{ sonarr_name }}"+sonarr_role_web_domain: "{{ user.domain }}"+sonarr_role_web_port: "8989"+sonarr_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='sonarr') + '.' + lookup('role_var', '_web_domain', role='sonarr')+ if (lookup('role_var', '_web_subdomain', role='sonarr') | length > 0)+ else lookup('role_var', '_web_domain', role='sonarr')) }}" ################################ # DNS ################################ -sonarr_dns_record: "{{ lookup('vars', sonarr_name + '_web_subdomain', default=sonarr_web_subdomain) }}"-sonarr_dns_zone: "{{ lookup('vars', sonarr_name + '_web_domain', default=sonarr_web_domain) }}"-sonarr_dns_proxy: "{{ dns.proxied }}"+sonarr_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='sonarr') }}"+sonarr_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='sonarr') }}"+sonarr_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -sonarr_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-sonarr_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', sonarr_name + '_name', default=sonarr_name)- if (sonarr_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-sonarr_traefik_middleware_custom: ""-sonarr_traefik_certresolver: "{{ traefik_default_certresolver }}"-sonarr_traefik_enabled: true-sonarr_traefik_api_enabled: true-sonarr_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/feed`) || PathPrefix(`/ping`)"+sonarr_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+sonarr_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + sonarr_name+ if (lookup('role_var', '_themepark_enabled', role='sonarr') and global_themepark_plugin_enabled)+ else '') }}"+sonarr_role_traefik_middleware_custom: ""+sonarr_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+sonarr_role_traefik_enabled: true+sonarr_role_traefik_api_enabled: true+sonarr_role_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/feed`) || PathPrefix(`/ping`)" ################################-# API-################################--# default to blank-sonarr_api_key:--################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-sonarr_themepark_enabled: false-sonarr_themepark_app: "sonarr"-sonarr_themepark_theme: "{{ global_themepark_theme }}"-sonarr_themepark_domain: "{{ global_themepark_domain }}"-sonarr_themepark_addons: []+sonarr_role_themepark_enabled: false+sonarr_role_themepark_app: "sonarr"+sonarr_role_themepark_theme: "{{ global_themepark_theme }}"+sonarr_role_themepark_domain: "{{ global_themepark_domain }}"+sonarr_role_themepark_addons: [] ################################ # Docker ################################ # Container-sonarr_docker_container: "{{ sonarr_name }}"+sonarr_role_docker_container: "{{ sonarr_name }}" # Image-sonarr_docker_image_pull: true-sonarr_docker_image_repo: "ghcr.io/hotio/sonarr"-sonarr_docker_image_tag: "release"-sonarr_docker_image: "{{ lookup('vars', sonarr_name + '_docker_image_repo', default=sonarr_docker_image_repo)- + ':' + lookup('vars', sonarr_name + '_docker_image_tag', default=sonarr_docker_image_tag) }}"--# Ports-sonarr_docker_ports_defaults: []-sonarr_docker_ports_custom: []-sonarr_docker_ports: "{{ lookup('vars', sonarr_name + '_docker_ports_defaults', default=sonarr_docker_ports_defaults)- + lookup('vars', sonarr_name + '_docker_ports_custom', default=sonarr_docker_ports_custom) }}"+sonarr_role_docker_image_pull: true+sonarr_role_docker_image_repo: "ghcr.io/hotio/sonarr"+sonarr_role_docker_image_tag: "release"+sonarr_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='sonarr') }}:{{ lookup('role_var', '_docker_image_tag', role='sonarr') }}" # Envs-sonarr_docker_envs_default:+sonarr_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" UMASK: "002" TZ: "{{ tz }}"-sonarr_docker_envs_custom: {}-sonarr_docker_envs: "{{ lookup('vars', sonarr_name + '_docker_envs_default', default=sonarr_docker_envs_default)- | combine(lookup('vars', sonarr_name + '_docker_envs_custom', default=sonarr_docker_envs_custom)) }}"--# Commands-sonarr_docker_commands_default: []-sonarr_docker_commands_custom: []-sonarr_docker_commands: "{{ lookup('vars', sonarr_name + '_docker_commands_default', default=sonarr_docker_commands_default)- + lookup('vars', sonarr_name + '_docker_commands_custom', default=sonarr_docker_commands_custom) }}"+sonarr_role_docker_envs_custom: {}+sonarr_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='sonarr')+ | combine(lookup('role_var', '_docker_envs_custom', role='sonarr')) }}" # Volumes-sonarr_docker_volumes_default:- - "{{ sonarr_paths_location }}:/config"+sonarr_role_docker_volumes_default:+ - "{{ sonarr_role_paths_location }}:/config" - "{{ server_appdata_path }}/scripts:/scripts"-sonarr_docker_volumes_legacy:+sonarr_role_docker_volumes_legacy: - "/mnt/unionfs/Media/TV:/tv"-sonarr_docker_volumes_custom: []-sonarr_docker_volumes: "{{ lookup('vars', sonarr_name + '_docker_volumes_default', default=sonarr_docker_volumes_default)- + lookup('vars', sonarr_name + '_docker_volumes_custom', default=sonarr_docker_volumes_custom)- + (lookup('vars', sonarr_name + '_docker_volumes_legacy', default=sonarr_docker_volumes_legacy)- if docker_legacy_volume- else []) }}"--# Devices-sonarr_docker_devices_default: []-sonarr_docker_devices_custom: []-sonarr_docker_devices: "{{ lookup('vars', sonarr_name + '_docker_devices_default', default=sonarr_docker_devices_default)- + lookup('vars', sonarr_name + '_docker_devices_custom', default=sonarr_docker_devices_custom) }}"--# Hosts-sonarr_docker_hosts_default: {}-sonarr_docker_hosts_custom: {}-sonarr_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', sonarr_name + '_docker_hosts_default', default=sonarr_docker_hosts_default))- | combine(lookup('vars', sonarr_name + '_docker_hosts_custom', default=sonarr_docker_hosts_custom)) }}"+sonarr_role_docker_volumes_custom: []+sonarr_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='sonarr')+ + lookup('role_var', '_docker_volumes_custom', role='sonarr')+ + (lookup('role_var', '_docker_volumes_legacy', role='sonarr')+ if docker_legacy_volume+ else []) }}" # Labels-sonarr_docker_labels_default: {}-sonarr_docker_labels_custom: {}-sonarr_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', sonarr_name + '_docker_labels_default', default=sonarr_docker_labels_default))- | combine((traefik_themepark_labels- if (sonarr_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', sonarr_name + '_docker_labels_custom', default=sonarr_docker_labels_custom)) }}"+sonarr_role_docker_labels_default: {}+sonarr_role_docker_labels_custom: {}+sonarr_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='sonarr')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='sonarr') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='sonarr')) }}" # Hostname-sonarr_docker_hostname: "{{ sonarr_name }}"--# Network Mode-sonarr_docker_network_mode_default: "{{ docker_networks_name_common }}"-sonarr_docker_network_mode: "{{ lookup('vars', sonarr_name + '_docker_network_mode_default', default=sonarr_docker_network_mode_default) }}"+sonarr_role_docker_hostname: "{{ sonarr_name }}" # Networks-sonarr_docker_networks_alias: "{{ sonarr_name }}"-sonarr_docker_networks_default: []-sonarr_docker_networks_custom: []-sonarr_docker_networks: "{{ docker_networks_common- + lookup('vars', sonarr_name + '_docker_networks_default', default=sonarr_docker_networks_default)- + lookup('vars', sonarr_name + '_docker_networks_custom', default=sonarr_docker_networks_custom) }}"--# Capabilities-sonarr_docker_capabilities_default: []-sonarr_docker_capabilities_custom: []-sonarr_docker_capabilities: "{{ lookup('vars', sonarr_name + '_docker_capabilities_default', default=sonarr_docker_capabilities_default)- + lookup('vars', sonarr_name + '_docker_capabilities_custom', default=sonarr_docker_capabilities_custom) }}"--# Security Opts-sonarr_docker_security_opts_default: []-sonarr_docker_security_opts_custom: []-sonarr_docker_security_opts: "{{ lookup('vars', sonarr_name + '_docker_security_opts_default', default=sonarr_docker_security_opts_default)- + lookup('vars', sonarr_name + '_docker_security_opts_custom', default=sonarr_docker_security_opts_custom) }}"+sonarr_role_docker_networks_alias: "{{ sonarr_name }}"+sonarr_role_docker_networks_default: []+sonarr_role_docker_networks_custom: []+sonarr_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='sonarr')+ + lookup('role_var', '_docker_networks_custom', role='sonarr') }}" # Restart Policy-sonarr_docker_restart_policy: unless-stopped+sonarr_role_docker_restart_policy: unless-stopped # State-sonarr_docker_state: started+sonarr_role_docker_state: started
modified
roles/sonarr/tasks/main2.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -24,5 +24,5 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: "Tweak Settings when SSO is enabled"- ansible.builtin.import_tasks: "subtasks/auth.yml"- when: (lookup('vars', sonarr_name + '_traefik_sso_middleware', default=sonarr_traefik_sso_middleware) | length > 0) and lookup('vars', sonarr_name + '_external_auth', default=sonarr_external_auth)+ ansible.builtin.include_tasks: "subtasks/auth.yml"+ when: (lookup('role_var', '_traefik_sso_middleware', role='sonarr') | length > 0) and lookup('role_var', '_external_auth', role='sonarr')
modified
roles/sonarr/tasks/subtasks/auth.yml
@@ -9,7 +9,7 @@ --- - name: Auth | Wait for 'config.xml' to be created ansible.builtin.wait_for:- path: "/opt/{{ sonarr_name }}/config.xml"+ path: "{{ server_appdata_path }}/{{ sonarr_name }}/config.xml" state: present - name: Auth | Wait for 10 seconds@@ -18,7 +18,7 @@ - name: Auth | Lookup AuthenticationMethod value community.general.xml:- path: "/opt/{{ sonarr_name }}/config.xml"+ path: "{{ server_appdata_path }}/{{ sonarr_name }}/config.xml" xpath: "/Config/AuthenticationMethod" content: "text" register: xmlresp@@ -28,7 +28,7 @@ block: - name: Auth | Change the 'AuthenticationMethod' attribute to 'External' community.general.xml:- path: "/opt/{{ sonarr_name }}/config.xml"+ path: "{{ server_appdata_path }}/{{ sonarr_name }}/config.xml" xpath: "/Config/AuthenticationMethod" value: "External"
modified
roles/system/tasks/main.yml
@@ -8,23 +8,23 @@ ######################################################################### --- - name: Logrotate- ansible.builtin.import_tasks: "subtasks/logrotate.yml"+ ansible.builtin.include_tasks: "subtasks/logrotate.yml" - name: APT tasks ansible.builtin.include_tasks: "subtasks/apt.yml" - name: Network tasks- ansible.builtin.import_tasks: "subtasks/network.yml"+ ansible.builtin.include_tasks: "subtasks/network.yml" - name: sysctl tasks ansible.builtin.include_tasks: "subtasks/sysctl.yml" when: run_sysctl_tasks -- name: pam_limits tasks+- name: PAM limits tasks ansible.builtin.include_tasks: "subtasks/pam_limits.yml" - name: Mounts tasks- ansible.builtin.import_tasks: "subtasks/mounts.yml"+ ansible.builtin.include_tasks: "subtasks/mounts.yml" - name: CPU Frequency tasks ansible.builtin.include_tasks: "subtasks/cpufrequency.yml"@@ -35,12 +35,12 @@ when: cpu_performance_mode and ("none" in systemd_detect_virt.stdout) - name: Set Time Zone task- ansible.builtin.import_tasks: "subtasks/timezone.yml"+ ansible.builtin.include_tasks: "subtasks/timezone.yml" when: (tz is defined) tags: set-timezone - name: Set Locale task- ansible.builtin.import_tasks: "subtasks/locale.yml"+ ansible.builtin.include_tasks: "subtasks/locale.yml" tags: set-locale - name: flush_handlers
modified
roles/system/tasks/subtasks/apt.yml
@@ -23,7 +23,7 @@ dest: "/etc/needrestart/conf.d/saltbox.conf" mode: "0644" force: true- when: ('needrestart' in ansible_facts.packages)+ when: ('needrestart' in ansible_facts['packages']) - name: APT | Fix any potential dpkg issues block:@@ -48,7 +48,7 @@ - name: APT | Install required packages ansible.builtin.apt:- state: present+ state: latest name: - apt-utils - byobu
modified
roles/system/tasks/subtasks/cpufrequency.yml
@@ -10,18 +10,18 @@ - name: CPU Frequency | Install 'cpufrequtils' ansible.builtin.apt: name: cpufrequtils- state: present+ state: latest - name: CPU Frequency | Install 'linux-tools' for Ubuntu ansible.builtin.apt:- state: present+ state: latest name: - linux-tools-common - linux-tools-generic- when: (ansible_distribution == 'Ubuntu')+ when: (ansible_facts['distribution'] == 'Ubuntu') -- name: "CPU Frequency | Install 'linux-tools-{{ ansible_kernel }}'"- ansible.builtin.shell: "apt-get install -qq $(apt-cache search -n linux-tools-{{ ansible_kernel }} | awk '{print $1}' | tail -n 1)"+- name: "CPU Frequency | Install 'linux-tools-{{ ansible_facts['kernel'] }}'"+ ansible.builtin.shell: "apt-get install -qq $(apt-cache search -n linux-tools-{{ ansible_facts['kernel'] }} | awk '{print $1}' | tail -n 1)" ignore_errors: true - name: CPU Frequency | Ensure 'cpufrequtils' is enabled@@ -68,4 +68,4 @@ enabled: false daemon_reload: true ignore_errors: true- when: ('ondemand.service' in services and services['ondemand.service']['status'] == 'enabled')+ when: ('ondemand.service' in services and ansible_facts['services']['ondemand.service']['status'] == 'enabled')
modified
roles/system/tasks/subtasks/locale.yml
@@ -10,14 +10,14 @@ - name: Locale | Install locales ansible.builtin.apt: name: locales- state: present+ state: latest -- name: Locale | Ensure localisation files for '{{ system_locale }}' are available+- name: Locale | Ensure localization files for '{{ system_locale }}' are available community.general.locale_gen: name: "{{ system_locale }}" state: present -- name: Locale | Ensure localisation files for '{{ system_language }}' are available+- name: Locale | Ensure localization files for '{{ system_language }}' are available community.general.locale_gen: name: "{{ system_language }}" state: present@@ -25,7 +25,6 @@ - name: Locale | Get current locale and language configuration ansible.builtin.command: localectl status register: locale_status- changed_when: false - name: Locale | Parse 'LANG' from current locale and language configuration ansible.builtin.set_fact:
modified
roles/system/tasks/subtasks/logrotate.yml
@@ -11,7 +11,7 @@ - name: Logrotate | Install required packages ansible.builtin.apt:- state: present+ state: latest name: - logrotate @@ -39,7 +39,7 @@ } - path: "sandbox" content: |- /opt/sandbox/sandbox.log {+ {{ server_appdata_path }}/sandbox/sandbox.log { su {{ user.name }} {{ user.name }} weekly rotate 5@@ -50,7 +50,7 @@ } - path: "saltbox_mod" content: |- /opt/saltbox_mod/saltbox_mod.log {+ {{ server_appdata_path }}/saltbox_mod/saltbox_mod.log { su {{ user.name }} {{ user.name }} weekly rotate 5@@ -61,10 +61,10 @@ } - path: "traefik" content: |- /opt/traefik/access.log {+ {{ server_appdata_path }}/traefik/access.log { su {{ user.name }} {{ user.name }}- rotate {{ traefik_log_max_backups }}- size {{ traefik_log_max_size }}+ rotate {{ lookup('role_var', '_log_max_backups', role='traefik') }}+ size {{ lookup('role_var', '_log_max_size', role='traefik') }} missingok notifempty postrotate@@ -78,6 +78,6 @@ path: "/etc/logrotate.d/{{ item.path }}" marker: "### SALTBOX MANAGED BLOCK - {mark} ###" block: "{{ item.content }}"- create: yes+ create: true mode: "0644" loop: "{{ logrotate_d_items }}"
modified
roles/system/tasks/subtasks/mounts.yml
@@ -9,7 +9,7 @@ --- - name: Mounts | Display system mounts ansible.builtin.debug:- msg: "{{ ansible_mounts }}"+ msg: "{{ ansible_facts['mounts'] }}" verbosity: 1 - name: Mounts | Set opts for '/' ext4 mount@@ -20,13 +20,13 @@ fstype: ext4 src: "{{ item.device }}" with_items:- - "{{ ansible_mounts }}"+ - "{{ ansible_facts['mounts'] }}" when: (item.mount == '/') and (item.fstype == 'ext4') - name: "Mounts | Install 'fuse3'" ansible.builtin.apt: name: "fuse3"- state: present+ state: latest - name: Mounts | Import 'fuse.conf' ansible.builtin.copy:
modified
roles/system/tasks/subtasks/network.yml
@@ -9,14 +9,14 @@ --- - name: Network | Install common packages ansible.builtin.apt:- state: present+ state: latest name: - vnstat - pciutils - ethtool - name: Network | Network Tasks Block- when: (ansible_default_ipv4 is defined) and (ansible_default_ipv4.type == "ether")+ when: (ansible_facts['default_ipv4'] is defined) and (ansible_facts['default_ipv4']['type'] == "ether") block: - name: Network | Check for '/etc/vnstat.conf' ansible.builtin.stat:@@ -27,7 +27,7 @@ ansible.builtin.lineinfile: path: "/etc/vnstat.conf" regexp: '(Interface)\s?.*'- line: '\1 "{{ ansible_default_ipv4.interface }}"'+ line: '\1 "{{ ansible_facts["default_ipv4"]["interface"] }}"' state: present backrefs: true when: (vnstat_conf.stat.exists)@@ -52,7 +52,7 @@ create: false marker: "### {mark} SALTBOX MANAGED BLOCK ###" block: |- ethtool -K {{ ansible_default_ipv4.interface }} tso off gso off+ ethtool -K {{ ansible_facts['default_ipv4']['interface'] }} tso off gso off insertbefore: "^exit 0" owner: "root" group: "root"
modified
roles/system/tasks/subtasks/timezone.yml
@@ -23,29 +23,50 @@ - (system.timezone is defined) - ('auto' in system.timezone | lower) block:- - name: Time Zone | Get IP geolocation data- community.general.ipinfoio_facts:+ - name: Time Zone | Get timezone from IP using multiple sources+ ip_timezone_lookup:+ ip_address: "{{ ip_address_public }}"+ timeout: 5+ min_consensus: 2+ register: tz_lookup - - name: Time Zone | Set 'timezone' variable from 'ipinfoio_facts'+ - name: Time Zone | Set 'timezone' variable from IP lookup ansible.builtin.set_fact:- timezone: "{{ ansible_facts.timezone }}"- timezone_string: "Time zone: {{ timezone }}"+ timezone: "{{ tz_lookup.timezone }}"+ when:+ - tz_lookup.timezone is defined+ - tz_lookup.timezone is not none+ - tz_lookup.confidence in ['high', 'medium']++ - name: Time Zone | Display timezone detection results+ ansible.builtin.debug:+ msg: "Detected timezone '{{ tz_lookup.timezone }}' with {{ tz_lookup.confidence }} confidence ({{ tz_lookup.consensus_count }}/{{ tz_lookup.successful_lookups }} sources agree)"+ when: tz_lookup.timezone is defined++ - name: Time Zone | Fail if low confidence or no consensus+ ansible.builtin.fail:+ msg: "Could not reliably determine timezone. Only {{ tz_lookup.consensus_count }} sources agreed. Manual configuration may be required."+ when:+ - tz_lookup.timezone is none or tz_lookup.confidence == 'low' rescue:- - name: Time Zone | Get IP geolocation data (with curl)+ - name: Time Zone | Fallback to curl method if module fails ansible.builtin.shell: "curl -s https://ipapi.co/timezone" register: system_curl_timezone failed_when: (system_curl_timezone.stdout | trim | length == 0) - - name: Time Zone | Set 'timezone' variable from 'ipinfoio_facts'+ - name: Time Zone | Set 'timezone' variable from curl fallback ansible.builtin.set_fact: timezone: "{{ system_curl_timezone.stdout | trim }}"- timezone_string: "Time zone: {{ timezone }}" - name: Time Zone | Check if timezone is valid ansible.builtin.fail:- msg: "Invalid timezone given, given timezone was {{ timezone }}\"."- when: (timezone_list.stdout is defined) and timezone not in timezone_list.stdout+ msg: "Invalid timezone given, given timezone was {{ timezone }}."+ when: (timezone_list.stdout_lines is defined) and timezone not in timezone_list.stdout_lines++- name: Time Zone | Set 'timezone_string' variable+ ansible.builtin.set_fact:+ timezone_string: "Time zone: {{ timezone }}" - name: Time Zone | Set timezone ansible.builtin.shell: "timedatectl set-timezone {{ timezone }}; timedatectl"
modified
roles/tautulli/defaults/main.yml
@@ -17,155 +17,112 @@ # Paths ################################ -tautulli_paths_folder: "{{ tautulli_name }}"-tautulli_paths_location: "{{ server_appdata_path }}/{{ tautulli_paths_folder }}"-tautulli_paths_scripts_location: "{{ server_appdata_path }}/scripts/tautulli"-tautulli_paths_folders_list:- - "{{ tautulli_paths_location }}"- - "{{ tautulli_paths_scripts_location }}"+tautulli_role_paths_folder: "{{ tautulli_name }}"+tautulli_role_paths_location: "{{ server_appdata_path }}/{{ tautulli_role_paths_folder }}"+tautulli_role_paths_scripts_location: "{{ server_appdata_path }}/scripts/tautulli"+tautulli_role_paths_folders_list:+ - "{{ tautulli_role_paths_location }}"+ - "{{ tautulli_role_paths_scripts_location }}" ################################ # Web ################################ -tautulli_web_subdomain: "{{ tautulli_name }}"-tautulli_web_domain: "{{ user.domain }}"-tautulli_web_port: "8181"-tautulli_web_url: "{{ 'https://' + (lookup('vars', tautulli_name + '_web_subdomain', default=tautulli_web_subdomain) + '.' + lookup('vars', tautulli_name + '_web_domain', default=tautulli_web_domain)- if (lookup('vars', tautulli_name + '_web_subdomain', default=tautulli_web_subdomain) | length > 0)- else lookup('vars', tautulli_name + '_web_domain', default=tautulli_web_domain)) }}"+tautulli_role_web_subdomain: "{{ tautulli_name }}"+tautulli_role_web_domain: "{{ user.domain }}"+tautulli_role_web_port: "8181"+tautulli_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='tautulli') + '.' + lookup('role_var', '_web_domain', role='tautulli')+ if (lookup('role_var', '_web_subdomain', role='tautulli') | length > 0)+ else lookup('role_var', '_web_domain', role='tautulli')) }}" ################################ # DNS ################################ -tautulli_dns_record: "{{ lookup('vars', tautulli_name + '_web_subdomain', default=tautulli_web_subdomain) }}"-tautulli_dns_zone: "{{ lookup('vars', tautulli_name + '_web_domain', default=tautulli_web_domain) }}"-tautulli_dns_proxy: "{{ dns.proxied }}"+tautulli_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='tautulli') }}"+tautulli_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='tautulli') }}"+tautulli_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -tautulli_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-tautulli_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', tautulli_name + '_name', default=tautulli_name)- if (tautulli_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-tautulli_traefik_middleware_custom: ""-tautulli_traefik_certresolver: "{{ traefik_default_certresolver }}"-tautulli_traefik_enabled: true-tautulli_traefik_api_enabled: true-tautulli_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/newsletter`) || PathPrefix(`/image`) || PathPrefix(`/pms_image_proxy`)"-tautulli_traefik_gzip_enabled: false+tautulli_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+tautulli_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + tautulli_name+ if (lookup('role_var', '_themepark_enabled', role='tautulli') and global_themepark_plugin_enabled)+ else '') }}"+tautulli_role_traefik_middleware_custom: ""+tautulli_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+tautulli_role_traefik_enabled: true+tautulli_role_traefik_api_enabled: true+tautulli_role_traefik_api_endpoint: "PathPrefix(`/api`) || PathPrefix(`/newsletter`) || PathPrefix(`/image`) || PathPrefix(`/pms_image_proxy`)"+tautulli_role_traefik_gzip_enabled: false ################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-tautulli_themepark_enabled: false-tautulli_themepark_app: "tautulli"-tautulli_themepark_theme: "{{ global_themepark_theme }}"-tautulli_themepark_domain: "{{ global_themepark_domain }}"-tautulli_themepark_addons: []+tautulli_role_themepark_enabled: false+tautulli_role_themepark_app: "tautulli"+tautulli_role_themepark_theme: "{{ global_themepark_theme }}"+tautulli_role_themepark_domain: "{{ global_themepark_domain }}"+tautulli_role_themepark_addons: [] ################################ # Docker ################################ # Container-tautulli_docker_container: "{{ tautulli_name }}"+tautulli_role_docker_container: "{{ tautulli_name }}" # Image-tautulli_docker_image_pull: true-tautulli_docker_image_repo: "ghcr.io/hotio/tautulli"-tautulli_docker_image_tag: "release"-tautulli_docker_image: "{{ lookup('vars', tautulli_name + '_docker_image_repo', default=tautulli_docker_image_repo)- + ':' + lookup('vars', tautulli_name + '_docker_image_tag', default=tautulli_docker_image_tag) }}"--# Ports-tautulli_docker_ports_defaults: []-tautulli_docker_ports_custom: []-tautulli_docker_ports: "{{ lookup('vars', tautulli_name + '_docker_ports_defaults', default=tautulli_docker_ports_defaults)- + lookup('vars', tautulli_name + '_docker_ports_custom', default=tautulli_docker_ports_custom) }}"+tautulli_role_docker_image_pull: true+tautulli_role_docker_image_repo: "ghcr.io/hotio/tautulli"+tautulli_role_docker_image_tag: "release"+tautulli_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='tautulli') }}:{{ lookup('role_var', '_docker_image_tag', role='tautulli') }}" # Envs-tautulli_docker_envs_default:+tautulli_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" UMASK: "002" TZ: "{{ tz }}"-tautulli_docker_envs_custom: {}-tautulli_docker_envs: "{{ lookup('vars', tautulli_name + '_docker_envs_default', default=tautulli_docker_envs_default)- | combine(lookup('vars', tautulli_name + '_docker_envs_custom', default=tautulli_docker_envs_custom)) }}"--# Commands-tautulli_docker_commands_default: []-tautulli_docker_commands_custom: []-tautulli_docker_commands: "{{ lookup('vars', tautulli_name + '_docker_commands_default', default=tautulli_docker_commands_default)- + lookup('vars', tautulli_name + '_docker_commands_custom', default=tautulli_docker_commands_custom) }}"+tautulli_role_docker_envs_custom: {}+tautulli_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='tautulli')+ | combine(lookup('role_var', '_docker_envs_custom', role='tautulli')) }}" # Volumes-tautulli_docker_volumes_default:- - "{{ tautulli_paths_location }}:/config"+tautulli_role_docker_volumes_default:+ - "{{ tautulli_role_paths_location }}:/config" - "{{ server_appdata_path }}/scripts:/scripts"-tautulli_docker_volumes_custom: []-tautulli_docker_volumes: "{{ lookup('vars', tautulli_name + '_docker_volumes_default', default=tautulli_docker_volumes_default)- + lookup('vars', tautulli_name + '_docker_volumes_custom', default=tautulli_docker_volumes_custom) }}"--# Devices-tautulli_docker_devices_default: []-tautulli_docker_devices_custom: []-tautulli_docker_devices: "{{ lookup('vars', tautulli_name + '_docker_devices_default', default=tautulli_docker_devices_default)- + lookup('vars', tautulli_name + '_docker_devices_custom', default=tautulli_docker_devices_custom) }}"--# Hosts-tautulli_docker_hosts_default: {}-tautulli_docker_hosts_custom: {}-tautulli_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', tautulli_name + '_docker_hosts_default', default=tautulli_docker_hosts_default))- | combine(lookup('vars', tautulli_name + '_docker_hosts_custom', default=tautulli_docker_hosts_custom)) }}"+tautulli_role_docker_volumes_custom: []+tautulli_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='tautulli')+ + lookup('role_var', '_docker_volumes_custom', role='tautulli') }}" # Labels-tautulli_docker_labels_default: {}-tautulli_docker_labels_custom: {}-tautulli_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', tautulli_name + '_docker_labels_default', default=tautulli_docker_labels_default))- | combine((traefik_themepark_labels- if (tautulli_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', tautulli_name + '_docker_labels_custom', default=tautulli_docker_labels_custom)) }}"+tautulli_role_docker_labels_default: {}+tautulli_role_docker_labels_custom: {}+tautulli_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='tautulli')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='tautulli') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='tautulli')) }}" # Hostname-tautulli_docker_hostname: "{{ tautulli_name }}"--# Network Mode-tautulli_docker_network_mode_default: "{{ docker_networks_name_common }}"-tautulli_docker_network_mode: "{{ lookup('vars', tautulli_name + '_docker_network_mode_default', default=tautulli_docker_network_mode_default) }}"+tautulli_role_docker_hostname: "{{ tautulli_name }}" # Networks-tautulli_docker_networks_alias: "{{ tautulli_name }}"-tautulli_docker_networks_default: []-tautulli_docker_networks_custom: []-tautulli_docker_networks: "{{ docker_networks_common- + lookup('vars', tautulli_name + '_docker_networks_default', default=tautulli_docker_networks_default)- + lookup('vars', tautulli_name + '_docker_networks_custom', default=tautulli_docker_networks_custom) }}"--# Capabilities-tautulli_docker_capabilities_default: []-tautulli_docker_capabilities_custom: []-tautulli_docker_capabilities: "{{ lookup('vars', tautulli_name + '_docker_capabilities_default', default=tautulli_docker_capabilities_default)- + lookup('vars', tautulli_name + '_docker_capabilities_custom', default=tautulli_docker_capabilities_custom) }}"--# Security Opts-tautulli_docker_security_opts_default: []-tautulli_docker_security_opts_custom: []-tautulli_docker_security_opts: "{{ lookup('vars', tautulli_name + '_docker_security_opts_default', default=tautulli_docker_security_opts_default)- + lookup('vars', tautulli_name + '_docker_security_opts_custom', default=tautulli_docker_security_opts_custom) }}"+tautulli_role_docker_networks_alias: "{{ tautulli_name }}"+tautulli_role_docker_networks_default: []+tautulli_role_docker_networks_custom: []+tautulli_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='tautulli')+ + lookup('role_var', '_docker_networks_custom', role='tautulli') }}" # Restart Policy-tautulli_docker_restart_policy: unless-stopped+tautulli_role_docker_restart_policy: unless-stopped # State-tautulli_docker_state: started+tautulli_role_docker_state: started
modified
roles/tautulli/tasks/main2.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"
modified
roles/traefik/defaults/main.yml
@@ -18,36 +18,13 @@ ################################ traefik_trusted_ips: ""-traefik_cloudflare_ips:- - 173.245.48.0/20- - 103.21.244.0/22- - 103.22.200.0/22- - 103.31.4.0/22- - 141.101.64.0/18- - 108.162.192.0/18- - 190.93.240.0/20- - 188.114.96.0/20- - 197.234.240.0/22- - 198.41.128.0/17- - 162.158.0.0/15- - 104.16.0.0/13- - 104.24.0.0/14- - 172.64.0.0/13- - 131.0.72.0/22- - 2400:cb00::/32- - 2606:4700::/32- - 2803:f800::/32- - 2405:b500::/32- - 2405:8100::/32- - 2a06:98c0::/29- - 2c0f:f248::/32 traefik_plugin_cloudflarewarp_enabled: true traefik_file_watch: "true" traefik_x_robots: "none,noarchive,nosnippet,notranslate,noimageindex" # HTTP3 can cause issues with some apps traefik_http3: false traefik_tailscale_enabled: false-# traefik_tailscale_bind_ip: "" # Set to override the WANIP port binding when server is not connected directly to the Internet.+# traefik_tailscale_bind_ip: "" # Set to override the WAN IP port binding when server is not connected directly to the Internet. # traefik_tailscale_bind_ipv6: "" # Same but IPv6 traefik_entrypoint_web_port: "80" traefik_entrypoint_web_readtimeout: "600"@@ -75,7 +52,7 @@ # type: both traefik_dns_resolvers: "1.1.1.1:53,1.0.0.1:53" traefik_disable_propagation_check: false-traefik_enable_http_validation: "{{ traefik_http or traefik.cert.http_validation }}"+traefik_enable_http_validation: "{{ traefik_http or (traefik.cert.http_validation | bool) }}" traefik_enable_zerossl: true # Path is internal to the container, so a host path of /opt/traefik/file.html becomes /etc/traefik/file.html traefik_crowdsec_ban_filepath: "/etc/traefik/ban.html"@@ -105,55 +82,66 @@ - "{{ item.value.port }}:{{ item.value.port }}/tcp" - "{{ item.value.port }}:{{ item.value.port }}/udp" +traefik_trusted_ips_template: "{{ traefik_trusted_ips+ if (traefik_trusted_ips | length > 0)+ else '' }}"++################################+# Lookups+################################++traefik_zerossl_file_ini: "{{ server_appdata_path }}/saltbox/zerossl.ini"+ ################################ # Booleans ################################ -traefik_authelia_enabled: "{{ 'authelia' in traefik_default_sso_middleware }}"-traefik_authentik_enabled: "{{ 'authentik' in traefik_default_sso_middleware }}"+traefik_role_authelia_enabled: "{{ 'authelia' in traefik_default_sso_middleware }}"+traefik_role_authentik_enabled: "{{ 'authentik' in traefik_default_sso_middleware }}"+traefik_role_metrics_enabled: "{{ traefik.metrics | bool }}" ################################ # Paths ################################ -traefik_paths_folder: "{{ traefik_name }}"-traefik_paths_location: "{{ server_appdata_path }}/{{ traefik_paths_folder }}"-traefik_paths_acme_config_location: "{{ traefik_paths_location }}/acme.json"-traefik_paths_folders_list:- - "{{ traefik_paths_location }}"+traefik_role_paths_folder: "{{ traefik_name }}"+traefik_role_paths_location: "{{ server_appdata_path }}/{{ traefik_role_paths_folder }}"+traefik_role_paths_acme_config_location: "{{ traefik_role_paths_location }}/acme.json"+traefik_role_paths_folders_list:+ - "{{ traefik_role_paths_location }}" ################################ # Web ################################ -traefik_web_subdomain: "{{ traefik.subdomains.dash }}"-traefik_web_domain: "{{ user.domain }}"-traefik_metrics_subdomain: "{{ traefik.subdomains.metrics }}"-traefik_metrics_domain: "{{ user.domain }}"+traefik_role_web_subdomain: "{{ traefik.subdomains.dash }}"+traefik_role_web_domain: "{{ user.domain }}"+traefik_role_metrics_subdomain: "{{ traefik.subdomains.metrics }}"+traefik_role_metrics_domain: "{{ user.domain }}" ################################ # Logging ################################ -traefik_log_level: "ERROR"-traefik_log_file: true-traefik_log_max_size: "10"-traefik_log_max_backups: "3"-traefik_log_max_age: "3"-traefik_log_compress: "true"-traefik_access_log: true-traefik_access_buffer: 100+traefik_role_log_level: "ERROR"+traefik_role_log_file: true+traefik_role_log_max_size: "10"+traefik_role_log_max_backups: "3"+traefik_role_log_max_age: "3"+traefik_role_log_compress: "true"+traefik_role_access_log: true+traefik_role_access_buffer: 100 ################################ # DNS ################################ -traefik_dns_record: "{{ traefik_web_subdomain }}"-traefik_dns_zone: "{{ traefik_web_domain }}"-traefik_dns_proxy: "{{ dns.proxied }}"-traefik_metrics_dns_record: "{{ traefik_metrics_subdomain }}"-traefik_metrics_dns_zone: "{{ traefik_metrics_domain }}"-traefik_metrics_dns_proxy: "{{ dns.proxied }}"+traefik_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='traefik') }}"+traefik_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='traefik') }}"+traefik_role_dns_proxy: "{{ dns_proxied }}"+traefik_role_metrics_dns_record: "{{ lookup('role_var', '_metrics_subdomain', role='traefik') }}"+traefik_role_metrics_dns_zone: "{{ lookup('role_var', '_metrics_domain', role='traefik') }}"+traefik_role_metrics_dns_proxy: "{{ dns_proxied }}" ################################ # DNS Provider@@ -224,51 +212,52 @@ ################################ # Container-traefik_docker_container: "{{ traefik_name }}"+traefik_role_docker_container: "{{ traefik_name }}" # Image-traefik_docker_image_pull: true-traefik_docker_image_tag: "v3.6"-traefik_docker_image: "traefik:{{ traefik_docker_image_tag }}"+traefik_role_docker_image_pull: true+traefik_role_docker_image_repo: "traefik"+traefik_role_docker_image_tag: "v3.6"+traefik_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='traefik') }}:{{ lookup('role_var', '_docker_image_tag', role='traefik') }}" # Ports-traefik_docker_ports_defaults:+traefik_role_docker_ports_default: - "{{ traefik_entrypoint_web_port }}:{{ traefik_entrypoint_web_port }}/tcp" - "{{ traefik_entrypoint_websecure_port }}:{{ traefik_entrypoint_websecure_port }}/tcp" - "{{ traefik_entrypoint_websecure_port }}:{{ traefik_entrypoint_websecure_port }}/udp"-traefik_docker_ports_tailscale_ipv4_defaults:+traefik_role_docker_ports_tailscale_ipv4_default: - "{{ lookup('vars', 'traefik_tailscale_bind_ip', default=ip_address_public) + ':' + traefik_entrypoint_web_port }}:{{ traefik_entrypoint_web_port }}/tcp" - "{{ lookup('vars', 'traefik_tailscale_bind_ip', default=ip_address_public) + ':' + traefik_entrypoint_websecure_port }}:{{ traefik_entrypoint_websecure_port }}/tcp" - "{{ lookup('vars', 'traefik_tailscale_bind_ip', default=ip_address_public) + ':' + traefik_entrypoint_websecure_port }}:{{ traefik_entrypoint_websecure_port }}/udp" - "{{ tailscale_ipv4 + ':' + traefik_entrypoint_web_port }}:81/tcp" - "{{ tailscale_ipv4 + ':' + traefik_entrypoint_websecure_port }}:444/tcp" - "{{ tailscale_ipv4 + ':' + traefik_entrypoint_websecure_port }}:444/udp"-traefik_docker_ports_tailscale_ipv6_defaults:+traefik_role_docker_ports_tailscale_ipv6_default: - "{{ '[' + lookup('vars', 'traefik_tailscale_bind_ipv6', default=ipv6_address_public) + ']:' + traefik_entrypoint_web_port }}:{{ traefik_entrypoint_web_port }}/tcp" - "{{ '[' + lookup('vars', 'traefik_tailscale_bind_ipv6', default=ipv6_address_public) + ']:' + traefik_entrypoint_websecure_port }}:{{ traefik_entrypoint_websecure_port }}/tcp" - "{{ '[' + lookup('vars', 'traefik_tailscale_bind_ipv6', default=ipv6_address_public) + ']:' + traefik_entrypoint_websecure_port }}:{{ traefik_entrypoint_websecure_port }}/udp" - "{{ '[' + tailscale_ipv6 + ']:' + traefik_entrypoint_web_port }}:81/tcp" - "{{ '[' + tailscale_ipv6 + ']:' + traefik_entrypoint_websecure_port }}:444/tcp" - "{{ '[' + tailscale_ipv6 + ']:' + traefik_entrypoint_websecure_port }}:444/udp"-traefik_docker_ports_custom: []-traefik_docker_ports: "{{ (traefik_docker_ports_defaults- if not traefik_tailscale_enabled- else traefik_docker_ports_tailscale_ipv4_defaults- + (traefik_docker_ports_tailscale_ipv6_defaults- if dns.ipv6- else []))- + traefik_docker_ports_custom }}"+traefik_role_docker_ports_custom: []+traefik_role_docker_ports: "{{ (lookup('role_var', '_docker_ports_default', role='traefik')+ if not traefik_tailscale_enabled+ else lookup('role_var', '_docker_ports_tailscale_ipv4_default', role='traefik')+ + (lookup('role_var', '_docker_ports_tailscale_ipv6_default', role='traefik')+ if dns_ipv6_enabled+ else []))+ + lookup('role_var', '_docker_ports_custom', role='traefik') }}" # Envs-traefik_docker_envs_default:+traefik_role_docker_envs_default: TZ: "{{ tz }}"-traefik_docker_envs_custom: {}-traefik_docker_envs: "{{ traefik_docker_envs_default- | combine(lookup('vars', 'traefik_dns_provider_' + traefik_challenge_provider, default={}))- | combine(traefik_docker_envs_custom) }}"+traefik_role_docker_envs_custom: {}+traefik_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='traefik')+ | combine(lookup('vars', 'traefik_dns_provider_' + traefik_challenge_provider, default={}))+ | combine(lookup('role_var', '_docker_envs_custom', role='traefik')) }}" # Commands-traefik_docker_commands_default:+traefik_role_docker_commands_default: - "--global.sendanonymoususage=false" - "--providers.file.directory=/etc/traefik" - "--providers.file.watch={{ traefik_file_watch }}"@@ -276,15 +265,15 @@ - "--providers.docker.exposedbydefault=false" - "--entrypoints.internal.address=:8080" - "--entrypoints.web.address=:{{ traefik_entrypoint_web_port }}"- - "{{ '--entrypoints.web.forwardedheaders.trustedIPs=' + (traefik_cloudflare_ips | join(',')) + (',' + traefik_trusted_ips if (traefik_trusted_ips | length > 0) else '') }}"- - "{{ '--entrypoints.web.proxyprotocol.trustedIPs=' + (traefik_cloudflare_ips | join(',')) + (',' + traefik_trusted_ips if (traefik_trusted_ips | length > 0) else '') }}"+ - "{{ ('--entrypoints.web.forwardedheaders.trustedIPs=' + traefik_trusted_ips_template) if (traefik_trusted_ips_template | length > 0) else omit }}"+ - "{{ ('--entrypoints.web.proxyprotocol.trustedIPs=' + traefik_trusted_ips_template) if (traefik_trusted_ips_template | length > 0) else omit }}" - "--entrypoints.web.transport.respondingTimeouts.readTimeout={{ traefik_entrypoint_web_readtimeout }}" - "--entrypoints.web.transport.respondingTimeouts.writeTimeout={{ traefik_entrypoint_web_writetimeout }}" - "--entrypoints.web.transport.respondingTimeouts.idleTimeout={{ traefik_entrypoint_web_idletimeout }}" - "--entrypoints.web.http.maxheaderbytes={{ traefik_entrypoint_web_request_maxheaderbytes }}" - "--entrypoints.websecure.address=:{{ traefik_entrypoint_websecure_port }}"- - "{{ '--entrypoints.websecure.forwardedheaders.trustedIPs=' + (traefik_cloudflare_ips | join(',')) + (',' + traefik_trusted_ips if (traefik_trusted_ips | length > 0) else '') }}"- - "{{ '--entrypoints.websecure.proxyprotocol.trustedIPs=' + (traefik_cloudflare_ips | join(',')) + (',' + traefik_trusted_ips if (traefik_trusted_ips | length > 0) else '') }}"+ - "{{ ('--entrypoints.websecure.forwardedheaders.trustedIPs=' + traefik_trusted_ips_template) if (traefik_trusted_ips_template | length > 0) else omit }}"+ - "{{ ('--entrypoints.websecure.proxyprotocol.trustedIPs=' + traefik_trusted_ips_template) if (traefik_trusted_ips_template | length > 0) else omit }}" - "--entrypoints.websecure.transport.respondingTimeouts.readTimeout={{ traefik_entrypoint_websecure_readtimeout }}" - "--entrypoints.websecure.transport.respondingTimeouts.writeTimeout={{ traefik_entrypoint_websecure_writetimeout }}" - "--entrypoints.websecure.transport.respondingTimeouts.idleTimeout={{ traefik_entrypoint_websecure_idletimeout }}"@@ -292,26 +281,26 @@ - "--entrypoints.websecure.http.tls.certResolver={{ traefik_default_certresolver }}" - "--api.dashboard=true" - "--api=true"- - "--log.level={{ traefik_log_level }}"- - "{{ '--log.filepath=/etc/traefik/traefik.log' if traefik_log_file else omit }}"- - "{{ '--log.maxsize=' + traefik_log_max_size if traefik_log_file else omit }}"- - "{{ '--log.maxbackups=' + traefik_log_max_backups if traefik_log_file else omit }}"- - "{{ '--log.maxage=' + traefik_log_max_age if traefik_log_file else omit }}"- - "{{ '--log.compress=' + traefik_log_compress if traefik_log_file else omit }}"- - "{{ '--log.nocolor=true' if traefik_log_file else omit }}"- - "--accesslog={{ traefik_access_log }}"+ - "--log.level={{ lookup('role_var', '_log_level', role='traefik') }}"+ - "{{ ('--log.filepath=/etc/traefik/traefik.log') if lookup('role_var', '_log_file', role='traefik') else omit }}"+ - "{{ ('--log.maxsize=' + lookup('role_var', '_log_max_size', role='traefik')) if lookup('role_var', '_log_file', role='traefik') else omit }}"+ - "{{ ('--log.maxbackups=' + lookup('role_var', '_log_max_backups', role='traefik')) if lookup('role_var', '_log_file', role='traefik') else omit }}"+ - "{{ ('--log.maxage=' + lookup('role_var', '_log_max_age', role='traefik')) if lookup('role_var', '_log_file', role='traefik') else omit }}"+ - "{{ ('--log.compress=' + lookup('role_var', '_log_compress', role='traefik')) if lookup('role_var', '_log_file', role='traefik') else omit }}"+ - "{{ '--log.nocolor=true' if lookup('role_var', '_log_file', role='traefik') else omit }}"+ - "--accesslog={{ lookup('role_var', '_access_log', role='traefik') }}" - "--accesslog.fields.names.StartUTC=drop" - "--accesslog.fields.headers.names.User-Agent=keep" - "--accesslog.fields.headers.names.Content-Type=keep" - "--accesslog.filepath=/etc/traefik/access.log"- - "--accesslog.bufferingsize={{ traefik_access_buffer }}"+ - "--accesslog.bufferingsize={{ lookup('role_var', '_access_buffer', role='traefik') }}" - "--certificatesresolvers.cfdns.acme.dnschallenge.provider={{ traefik_challenge_provider }}"- - "{{ '--certificatesresolvers.cfdns.acme.dnschallenge.resolvers=' + traefik_dns_resolvers if (traefik_dns_resolvers | length > 0) else omit }}"+ - "{{ ('--certificatesresolvers.cfdns.acme.dnschallenge.resolvers=' + traefik_dns_resolvers) if (traefik_dns_resolvers | length > 0) else omit }}" - "--certificatesresolvers.cfdns.acme.email={{ user.email }}" - "--certificatesresolvers.cfdns.acme.storage=/etc/traefik/acme.json" - "{{ '--certificatesresolvers.cfdns.acme.dnschallenge.propagation.delayBeforeChecks=60s' if traefik_disable_propagation_check else omit }}" - "{{ '--certificatesresolvers.cfdns.acme.dnschallenge.propagation.disableChecks=true' if traefik_disable_propagation_check else omit }}"-traefik_docker_commands_zerossl_acme:+traefik_role_docker_commands_zerossl_acme: - "--certificatesresolvers.zerossl.acme.dnschallenge.provider={{ traefik_challenge_provider }}" - "{{ '--certificatesresolvers.zerossl.acme.dnschallenge.resolvers=' + traefik_dns_resolvers if (traefik_dns_resolvers | length > 0) else omit }}" - "--certificatesresolvers.zerossl.acme.email={{ user.email }}"@@ -321,20 +310,20 @@ - "--certificatesresolvers.zerossl.acme.storage=/etc/traefik/acme.json" - "{{ '--certificatesresolvers.zerossl.acme.dnschallenge.propagation.delayBeforeChecks=60s' if traefik_disable_propagation_check else omit }}" - "{{ '--certificatesresolvers.zerossl.acme.dnschallenge.propagation.disableChecks=true' if traefik_disable_propagation_check else omit }}"-traefik_docker_commands_http_validation_acme:+traefik_role_docker_commands_http_validation_acme: - "--certificatesresolvers.httpresolver.acme.httpchallenge.entrypoint=web" - "--certificatesresolvers.httpresolver.acme.email={{ user.email }}" - "--certificatesresolvers.httpresolver.acme.storage=/etc/traefik/acme.json"-traefik_docker_commands_http_validation_acme_zerossl:+traefik_role_docker_commands_http_validation_acme_zerossl: - "--certificatesresolvers.zerosslhttp.acme.httpchallenge.entrypoint=web" - "--certificatesresolvers.zerosslhttp.acme.email={{ user.email }}" - "--certificatesresolvers.zerosslhttp.acme.caserver=https://acme.zerossl.com/v2/DV90" - "--certificatesresolvers.zerosslhttp.acme.eab.kid={{ traefik_zerossl_kid | default('') }}" - "--certificatesresolvers.zerosslhttp.acme.eab.hmacencoded={{ traefik_zerossl_hmacencoded | default('') }}" - "--certificatesresolvers.zerosslhttp.acme.storage=/etc/traefik/acme.json"-traefik_docker_commands_google_acme:+traefik_role_docker_commands_google_acme: - "--certificatesresolvers.google.acme.dnschallenge.provider={{ traefik_challenge_provider }}"- - "{{ '--certificatesresolvers.google.acme.dnschallenge.resolvers=' + traefik_dns_resolvers if (traefik_dns_resolvers | length > 0) else omit }}"+ - "{{ ('--certificatesresolvers.google.acme.dnschallenge.resolvers=' + traefik_dns_resolvers) if (traefik_dns_resolvers | length > 0) else omit }}" - "--certificatesresolvers.google.acme.email={{ user.email }}" - "--certificatesresolvers.google.acme.caserver=https://dv.acme-v02.api.pki.goog/directory" - "--certificatesresolvers.google.acme.eab.kid={{ traefik_google_kid | default('') }}"@@ -342,105 +331,98 @@ - "--certificatesresolvers.google.acme.storage=/etc/traefik/acme.json" - "{{ '--certificatesresolvers.google.acme.dnschallenge.propagation.delayBeforeChecks=60s' if traefik_disable_propagation_check else omit }}" - "{{ '--certificatesresolvers.google.acme.dnschallenge.propagation.disableChecks=true' if traefik_disable_propagation_check else omit }}"-traefik_docker_commands_google_acme_http:+traefik_role_docker_commands_google_acme_http: - "--certificatesresolvers.googlehttp.acme.httpchallenge.entrypoint=web" - "--certificatesresolvers.googlehttp.acme.email={{ user.email }}" - "--certificatesresolvers.googlehttp.acme.caserver=https://dv.acme-v02.api.pki.goog/directory" - "--certificatesresolvers.googlehttp.acme.eab.kid={{ traefik_google_kid | default('') }}" - "--certificatesresolvers.googlehttp.acme.eab.hmacencoded={{ traefik_google_hmacencoded | default('') }}" - "--certificatesresolvers.googlehttp.acme.storage=/etc/traefik/acme.json"-traefik_docker_commands_metrics:+traefik_role_docker_commands_metrics: - "--metrics.prometheus=true" - "--metrics.prometheus.addentrypointslabels=true" - "--metrics.prometheus.addrouterslabels=true" - "--metrics.prometheus.addserviceslabels=true" - "--metrics.prometheus.manualrouting=true"-traefik_docker_commands_cloudflarewarp_plugin:- - "--experimental.plugins.cloudflarewarp.modulename=github.com/BetterCorp/cloudflarewarp"- - "--experimental.plugins.cloudflarewarp.version=v1.3.3"-traefik_docker_commands_themepark_plugin:+traefik_role_docker_commands_cloudflarewarp_plugin:+ - "--experimental.plugins.cloudflarewarp.modulename=github.com/saltyorg/cloudflarewarp"+ - "--experimental.plugins.cloudflarewarp.version=v1.0.0"+traefik_role_docker_commands_themepark_plugin: - "--experimental.plugins.themepark.modulename=github.com/packruler/traefik-themepark" - "--experimental.plugins.themepark.version=v1.4.2"-traefik_docker_commands_http3:+traefik_role_docker_commands_http3: - "--entrypoints.websecure.http3.advertisedport={{ traefik_entrypoint_websecure_port }}"-traefik_docker_commands_tailscale:+traefik_role_docker_commands_tailscale: - "--entrypoints.tailscale-web.address=:81" - "--entrypoints.tailscale-websecure.address=:444"-traefik_docker_commands_crowdsec:+traefik_role_docker_commands_crowdsec: - "--experimental.plugins.bouncer.modulename=github.com/maxlerebourg/crowdsec-bouncer-traefik-plugin" - "--experimental.plugins.bouncer.version=v1.4.4"-traefik_docker_commands_custom: []-traefik_docker_commands: "{{ traefik_docker_commands_default- + traefik_docker_commands_custom- + (traefik_docker_commands_metrics- if traefik.metrics- else [])- + (traefik_docker_commands_cloudflarewarp_plugin- if traefik_plugin_cloudflarewarp_enabled and cloudflare_is_enabled- else [])- + (traefik_docker_commands_themepark_plugin- if global_themepark_plugin_enabled- else [])- + (traefik_docker_commands_zerossl_acme- if (traefik_enable_zerossl)- else [])- + (traefik_docker_commands_http_validation_acme_zerossl- if (traefik_enable_http_validation and traefik_enable_zerossl)- else [])- + (traefik_docker_commands_http_validation_acme- if (traefik_enable_http_validation)- else [])- + (traefik_docker_commands_google_acme- if (traefik_google_kid is defined) and (traefik_google_hmacencoded is defined)- else [])- + (traefik_docker_commands_google_acme_http- if (traefik_google_kid is defined) and (traefik_google_hmacencoded is defined) and (traefik_enable_http_validation)- else [])- + (traefik_docker_commands_http3- if traefik_http3- else [])- + (traefik_docker_commands_tailscale- if traefik_tailscale_enabled- else [])- + (traefik_docker_commands_crowdsec- if crowdsec_is_enabled- else [])- + custom_entrypoints }}"+traefik_role_docker_commands_custom: []+traefik_role_docker_commands: "{{ lookup('role_var', '_docker_commands_default', role='traefik')+ + lookup('role_var', '_docker_commands_custom', role='traefik')+ + (lookup('role_var', '_docker_commands_metrics', role='traefik')+ if (lookup('role_var', '_metrics_enabled', role='traefik'))+ else [])+ + (lookup('role_var', '_docker_commands_cloudflarewarp_plugin', role='traefik')+ if traefik_plugin_cloudflarewarp_enabled and cloudflare_is_enabled+ else [])+ + (lookup('role_var', '_docker_commands_themepark_plugin', role='traefik')+ if global_themepark_plugin_enabled+ else [])+ + (lookup('role_var', '_docker_commands_zerossl_acme', role='traefik')+ if (traefik_enable_zerossl)+ else [])+ + (lookup('role_var', '_docker_commands_http_validation_acme_zerossl', role='traefik')+ if (traefik_enable_http_validation and traefik_enable_zerossl)+ else [])+ + (lookup('role_var', '_docker_commands_http_validation_acme', role='traefik')+ if (traefik_enable_http_validation)+ else [])+ + (lookup('role_var', '_docker_commands_google_acme', role='traefik')+ if (traefik_google_kid is defined) and (traefik_google_hmacencoded is defined)+ else [])+ + (lookup('role_var', '_docker_commands_google_acme_http', role='traefik')+ if (traefik_google_kid is defined) and (traefik_google_hmacencoded is defined) and (traefik_enable_http_validation)+ else [])+ + (lookup('role_var', '_docker_commands_http3', role='traefik')+ if traefik_http3+ else [])+ + (lookup('role_var', '_docker_commands_tailscale', role='traefik')+ if traefik_tailscale_enabled+ else [])+ + (lookup('role_var', '_docker_commands_crowdsec', role='traefik')+ if crowdsec_is_enabled+ else [])+ + custom_entrypoints }}" # Volumes-traefik_docker_volumes_default:- - "/var/run/docker.sock:/var/run/docker.sock:ro"- - "{{ traefik_paths_location }}:/etc/traefik"-traefik_docker_volumes_custom: []-traefik_docker_volumes: "{{ traefik_docker_volumes_default- + traefik_docker_volumes_custom }}"--# Devices-traefik_docker_devices_default: []-traefik_docker_devices_custom: []-traefik_docker_devices: "{{ traefik_docker_devices_default- + traefik_docker_devices_custom }}"+traefik_role_docker_volumes_default:+ - "/var/run/docker.sock:/var/run/docker.sock"+ - "{{ traefik_role_paths_location }}:/etc/traefik"+traefik_role_docker_volumes_custom: []+traefik_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='traefik')+ + lookup('role_var', '_docker_volumes_custom', role='traefik') }}" # Hosts-traefik_docker_hosts_default:+traefik_role_docker_hosts_default: host.docker.internal: "172.19.0.1"-traefik_docker_hosts_custom: {}-traefik_docker_hosts: "{{ docker_hosts_common- | combine(traefik_docker_hosts_default)- | combine(traefik_docker_hosts_custom) }}"+traefik_role_docker_hosts_custom: {}+traefik_role_docker_hosts: "{{ lookup('role_var', '_docker_hosts_default', role='traefik')+ | combine(lookup('role_var', '_docker_hosts_custom', role='traefik')) }}" # Labels-traefik_docker_labels_default:- com.github.saltbox.saltbox_managed: "true"+traefik_role_docker_labels_use_common: false+traefik_role_docker_labels_default: traefik.enable: "true" traefik.http.routers.traefik-internal.rule: "Host(`{{ traefik_name }}`)" traefik.http.routers.traefik-internal.entrypoints: "internal" traefik.http.routers.traefik-internal.service: "api@internal"- traefik.http.routers.traefik-http.rule: "Host(`{{ traefik_web_subdomain }}.{{ traefik_web_domain }}`)"+ traefik.http.routers.traefik-http.rule: "Host(`{{ lookup('role_var', '_web_subdomain', role='traefik') }}.{{ lookup('role_var', '_web_domain', role='traefik') }}`)" traefik.http.routers.traefik-http.entrypoints: "{{ traefik_entrypoint_web }}" traefik.http.routers.traefik-http.middlewares: "{{ traefik_default_middleware_http }}" traefik.http.routers.traefik-http.priority: "20"- traefik.http.routers.traefik.rule: "Host(`{{ traefik_web_subdomain }}.{{ traefik_web_domain }}`)"+ traefik.http.routers.traefik.rule: "Host(`{{ lookup('role_var', '_web_subdomain', role='traefik') }}.{{ lookup('role_var', '_web_domain', role='traefik') }}`)" traefik.http.routers.traefik.entrypoints: "{{ traefik_entrypoint_websecure }}" traefik.http.routers.traefik.tls: "true" traefik.http.routers.traefik.tls.options: "securetls@file"@@ -452,40 +434,40 @@ traefik.http.middlewares.autodetect.contenttype: "true" traefik.http.middlewares.redirect-to-https.redirectscheme.scheme: "https" traefik.http.middlewares.redirect-to-https.redirectscheme.permanent: "true"- traefik.http.middlewares.authelia.forwardauth.address: "{{ 'http://authelia:9091/api/verify?rd=' + authelia_web_url + '/'+ traefik.http.middlewares.authelia.forwardauth.address: "{{ 'http://' + authelia_name + ':9091/api/verify?rd=' + lookup('role_var', '_web_url', role='authelia') + '/' if authelia_is_master- else authelia_web_url + '/api/verify?rd=' + authelia_web_url + '/' }}"+ else lookup('role_var', '_web_url', role='authelia') + '/api/verify?rd=' + lookup('role_var', '_web_url', role='authelia') + '/' }}" traefik.http.middlewares.authelia.forwardauth.trustForwardHeader: "true"- traefik.http.middlewares.authelia.forwardauth.authResponseHeaders: "Remote-User, Remote-Groups, Remote-Name, Remote-Email"- traefik.http.middlewares.authelia-basic.forwardauth.address: "{{ 'http://authelia:9091/api/verify?auth=basic&rd=' + authelia_web_url + '/'+ traefik.http.middlewares.authelia.forwardauth.authResponseHeaders: "{{ lookup('role_var', '_response_headers', role='authelia') | join(',') }}"+ traefik.http.middlewares.authelia-basic.forwardauth.address: "{{ 'http://' + authelia_name + ':9091/api/verify?auth=basic&rd=' + lookup('role_var', '_web_url', role='authelia') + '/' if authelia_is_master- else authelia_web_url + '/api/verify?auth=basic&rd=' + authelia_web_url + '/' }}"+ else lookup('role_var', '_web_url', role='authelia') + '/api/verify?auth=basic&rd=' + lookup('role_var', '_web_url', role='authelia') + '/' }}" traefik.http.middlewares.authelia-basic.forwardauth.trustForwardHeader: "true"- traefik.http.middlewares.authelia-basic.forwardauth.authResponseHeaders: "Remote-User, Remote-Groups, Remote-Name, Remote-Email"+ traefik.http.middlewares.authelia-basic.forwardauth.authResponseHeaders: "{{ lookup('role_var', '_response_headers', role='authelia') | join(',') }}" traefik.http.middlewares.authentik.forwardauth.address: "{{ 'http://' + authentik_name + ':9000/outpost.goauthentik.io/auth/traefik' if authentik_is_master- else authentik_web_url + '/outpost.goauthentik.io/auth/traefik' }}"+ else lookup('role_var', '_web_url', role='authentik') + '/outpost.goauthentik.io/auth/traefik' }}" traefik.http.middlewares.authentik.forwardauth.trustForwardHeader: "true"- traefik.http.middlewares.authentik.forwardauth.authResponseHeaders: "X-authentik-username,X-authentik-groups,X-authentik-email,X-authentik-name,X-authentik-uid,X-authentik-jwt,X-authentik-meta-jwks,X-authentik-meta-outpost,X-authentik-meta-provider,X-authentik-meta-app,X-authentik-meta-version"--traefik_docker_labels_cloudflare:+ traefik.http.middlewares.authentik.forwardauth.authResponseHeaders: "{{ lookup('role_var', '_response_headers', role='authentik') | join(',') }}"++traefik_role_docker_labels_cloudflare: traefik.http.middlewares.cloudflarewarp.plugin.cloudflarewarp.disableDefault: "false" -traefik_docker_labels_dns_validation:+traefik_role_docker_labels_dns_validation: traefik.http.routers.traefik.tls.certresolver: "{{ traefik_default_certresolver }}" traefik.http.routers.traefik.tls.domains[0].main: "{{ user.domain }}" traefik.http.routers.traefik.tls.domains[0].sans: "{{ '*.' + user.domain }}" -traefik_docker_labels_http_validation:+traefik_role_docker_labels_http_validation: traefik.http.routers.traefik.tls.certresolver: "{{ traefik_default_certresolver }}" -traefik_docker_labels_metrics:- traefik.http.routers.metrics-http.rule: "Host(`{{ traefik_metrics_subdomain }}.{{ traefik_metrics_domain }}`) && Path(`/prometheus`)"+traefik_role_docker_labels_metrics:+ traefik.http.routers.metrics-http.rule: "Host(`{{ lookup('role_var', '_metrics_subdomain', role='traefik') }}.{{ lookup('role_var', '_metrics_domain', role='traefik') }}`) && Path(`/prometheus`)" traefik.http.routers.metrics-http.service: prometheus@internal traefik.http.routers.metrics-http.entrypoints: "{{ traefik_entrypoint_web }}" traefik.http.routers.metrics-http.middlewares: "traefik-auth,{{ traefik_default_middleware_http_api }}" traefik.http.routers.metrics-http.priority: "20"- traefik.http.routers.metrics.rule: "Host(`{{ traefik_metrics_subdomain }}.{{ traefik_metrics_domain }}`) && Path(`/prometheus`)"+ traefik.http.routers.metrics.rule: "Host(`{{ lookup('role_var', '_metrics_subdomain', role='traefik') }}.{{ lookup('role_var', '_metrics_domain', role='traefik') }}`) && Path(`/prometheus`)" traefik.http.routers.metrics.service: prometheus@internal traefik.http.routers.metrics.entrypoints: "{{ traefik_entrypoint_websecure }}" traefik.http.routers.metrics.tls: "true"@@ -493,56 +475,44 @@ traefik.http.routers.metrics.middlewares: "traefik-auth,{{ traefik_default_middleware_api }}" traefik.http.routers.metrics.priority: "20" -traefik_docker_labels_crowdsec:+traefik_role_docker_labels_crowdsec: traefik.http.middlewares.crowdsec.plugin.bouncer.enabled: "true" traefik.http.middlewares.crowdsec.plugin.bouncer.crowdseclapikey: "{{ traefik_crowdsec_bouncer_key | default('') }}" traefik.http.middlewares.crowdsec.plugin.bouncer.crowdseclapischeme: "http" traefik.http.middlewares.crowdsec.plugin.bouncer.crowdseclapihost: "172.19.0.1:{{ traefik_crowdsec_port }}"- traefik.http.middlewares.crowdsec.plugin.bouncer.forwardedheaderstrustedips: "{{ (traefik_cloudflare_ips | join(',')) + (',' + traefik_trusted_ips if (traefik_trusted_ips | length > 0) else '') }}"+ traefik.http.middlewares.crowdsec.plugin.bouncer.forwardedheaderstrustedips: "{{ traefik_trusted_ips_template if (traefik_trusted_ips_template | length > 0) else omit }}" traefik.http.middlewares.crowdsec.plugin.bouncer.banhtmlfilepath: "{{ traefik_crowdsec_ban_filepath }}" -traefik_docker_labels_custom: {}--traefik_docker_labels: "{{ traefik_docker_labels_default- | combine(traefik_docker_labels_custom)- | combine((traefik_docker_labels_http_validation- if traefik_http- else traefik_docker_labels_dns_validation))- | combine((traefik_docker_labels_metrics- if traefik.metrics- else {}))- | combine((traefik_docker_labels_cloudflare- if traefik_plugin_cloudflarewarp_enabled and cloudflare_is_enabled- else {}))- | combine((traefik_docker_labels_crowdsec- if crowdsec_is_enabled- else {})) }}"+traefik_role_docker_labels_custom: {}+traefik_role_docker_labels: "{{ docker_labels_saltbox+ | combine(lookup('role_var', '_docker_labels_default', role='traefik'))+ | combine(lookup('role_var', '_docker_labels_custom', role='traefik'))+ | combine((lookup('role_var', '_docker_labels_http_validation', role='traefik')+ if traefik_http+ else lookup('role_var', '_docker_labels_dns_validation', role='traefik')))+ | combine((lookup('role_var', '_docker_labels_metrics', role='traefik')+ if lookup('role_var', '_metrics_enabled', role='traefik')+ else {}))+ | combine((lookup('role_var', '_docker_labels_cloudflare', role='traefik')+ if traefik_plugin_cloudflarewarp_enabled and cloudflare_is_enabled+ else {}))+ | combine((lookup('role_var', '_docker_labels_crowdsec', role='traefik')+ if crowdsec_is_enabled+ else {})) }}" # Hostname-traefik_docker_hostname: "{{ traefik_name }}"+traefik_role_docker_hostname: "{{ traefik_name }}" # Networks-traefik_docker_networks_alias: "{{ traefik_name }}"-traefik_docker_networks_default: []-traefik_docker_networks_custom: []-traefik_docker_networks: "{{ docker_networks_common- + traefik_docker_networks_default- + traefik_docker_networks_custom }}"--# Capabilities-traefik_docker_capabilities_default: []-traefik_docker_capabilities_custom: []-traefik_docker_capabilities: "{{ traefik_docker_capabilities_default- + traefik_docker_capabilities_custom }}"--# Security Opts-traefik_docker_security_opts_default: []-traefik_docker_security_opts_custom: []-traefik_docker_security_opts: "{{ traefik_docker_security_opts_default- + traefik_docker_security_opts_custom }}"+traefik_role_docker_networks_alias: "{{ traefik_name }}"+traefik_role_docker_networks_default: []+traefik_role_docker_networks_custom: []+traefik_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='traefik')+ + lookup('role_var', '_docker_networks_custom', role='traefik') }}" # Restart Policy-traefik_docker_restart_policy: unless-stopped+traefik_role_docker_restart_policy: unless-stopped # State-traefik_docker_state: started+traefik_role_docker_state: started
modified
roles/traefik/tasks/main.yml
@@ -10,28 +10,28 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_metrics_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_metrics_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_metrics_dns_proxy') }}"- when: traefik.metrics+ dns_record: "{{ lookup('role_var', '_metrics_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_metrics_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_metrics_dns_proxy') }}"+ when: lookup('role_var', '_metrics_enabled', role='traefik') - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml" -- name: Remove existing Docker container+- name: Remove existing Error Pages Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml" vars: var_prefix: "error_pages" when: not traefik_error_pages_enabled -- name: Remove existing Docker container+- name: Remove existing Authelia Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml" vars: var_prefix: "authelia"@@ -41,20 +41,20 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/directories/create_directories.yml" - name: Traefik ZeroSSL Tasks- ansible.builtin.import_tasks: "subtasks/zerossl.yml"+ ansible.builtin.include_tasks: "subtasks/zerossl.yml" when: traefik_enable_zerossl and (not continuous_integration) - name: Traefik Config Tasks- ansible.builtin.import_tasks: "subtasks/config.yml"+ ansible.builtin.include_tasks: "subtasks/config.yml" when: (not continuous_integration) - name: Traefik Crowdsec Tasks- ansible.builtin.import_tasks: "subtasks/crowdsec.yml"+ ansible.builtin.include_tasks: "subtasks/crowdsec.yml" when: crowdsec_is_enabled - name: Remove Certificates ansible.builtin.file:- path: "{{ traefik_paths_location }}/acme.json"+ path: "{{ lookup('role_var', '_paths_location', role='traefik') }}/acme.json" state: absent when: ('traefik-reset-certs' in ansible_run_tags) @@ -63,13 +63,13 @@ - name: Wait for 'acme.json' to be created ansible.builtin.wait_for:- path: "{{ traefik_paths_acme_config_location }}"+ path: "{{ lookup('role_var', '_paths_acme_config_location', role='traefik') }}" state: present when: (not continuous_integration) -- name: Chown '{{ traefik_paths_location }}'+- name: Chown '{{ lookup('role_var', '_paths_location', role='traefik') }}' ansible.builtin.file:- path: "{{ traefik_paths_location }}"+ path: "{{ lookup('role_var', '_paths_location', role='traefik') }}" state: directory recurse: true owner: "{{ user.name }}"@@ -77,9 +77,9 @@ mode: "0775" when: (not continuous_integration) -- name: Chown '{{ traefik_paths_acme_config_location }}'+- name: Chown '{{ lookup('role_var', '_paths_acme_config_location', role='traefik') }}' ansible.builtin.file:- path: "{{ traefik_paths_acme_config_location }}"+ path: "{{ lookup('role_var', '_paths_acme_config_location', role='traefik') }}" state: file owner: "{{ user.name }}" group: "{{ user.name }}"@@ -89,12 +89,12 @@ - name: "Import Authelia Role" ansible.builtin.include_role: name: authelia- when: traefik_authelia_enabled and authelia_is_master+ when: lookup('role_var', '_authelia_enabled', role='traefik') and authelia_is_master - name: "Import Authentik Role" ansible.builtin.include_role: name: authentik- when: traefik_authentik_enabled and authentik_is_master+ when: lookup('role_var', '_authentik_enabled', role='traefik') and authentik_is_master - name: "Import Error Pages Role" ansible.builtin.include_role:
modified
roles/traefik/tasks/subtasks/config.yml
@@ -10,7 +10,7 @@ - name: "Import 'dynamic.yml'" ansible.builtin.template: src: dynamic.yml.j2- dest: "{{ traefik_paths_location }}/dynamic.yml"+ dest: "{{ traefik_role_paths_location }}/dynamic.yml" force: true owner: "{{ user.name }}" group: "{{ user.name }}"@@ -18,17 +18,18 @@ - name: Remove 'themepark.yml' ansible.builtin.file:- path: "{{ traefik_paths_location }}/themepark.yml"+ path: "{{ traefik_role_paths_location }}/themepark.yml" state: absent - name: Create auth file community.general.htpasswd:- path: /opt/traefik/auth+ path: "{{ server_appdata_path }}/traefik/auth" name: "{{ user.name }}" password: "{{ user.pass }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"+ no_log: true - name: Initialize 'custom_entrypoints' variable ansible.builtin.set_fact:@@ -53,9 +54,9 @@ loop: "{{ traefik_entrypoint_custom | dict2items }}" when: (traefik_entrypoint_custom | length > 0) -- name: Set 'traefik_docker_ports_defaults' variable+- name: Set 'traefik_role_docker_ports_default' variable ansible.builtin.set_fact:- traefik_docker_ports_defaults: "{{ (traefik_docker_ports_defaults + custom_entrypoints_ports) | unique }}"+ traefik_role_docker_ports_default: "{{ (traefik_role_docker_ports_default + custom_entrypoints_ports) | unique }}" when: (traefik_entrypoint_custom | length > 0) - name: Tailscale block
modified
roles/traefik/tasks/subtasks/crowdsec.yml
@@ -38,7 +38,7 @@ ansible.builtin.copy: src: "ban.html" force: true- dest: "{{ traefik_paths_location }}/ban.html"+ dest: "{{ traefik_role_paths_location }}/ban.html" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"
modified
roles/traefik/tasks/subtasks/zerossl.yml
@@ -13,7 +13,7 @@ - name: "ZeroSSL | Check if PIN exists" ansible.builtin.stat:- path: "/opt/saltbox/zerossl.ini"+ path: "{{ traefik_zerossl_file_ini }}" register: zerossl_ini - name: "ZeroSSL | Existing Credentials Block"@@ -21,8 +21,8 @@ block: - name: "ZeroSSL | Set ZeroSSL variables" ansible.builtin.set_fact:- traefik_zerossl_kid: "{{ lookup('ini', 'kid section=zerossl file=/opt/saltbox/zerossl.ini') }}"- traefik_zerossl_hmacencoded: "{{ lookup('ini', 'hmacencoded section=zerossl file=/opt/saltbox/zerossl.ini') }}"+ traefik_zerossl_kid: "{{ lookup('ini', 'kid section=zerossl file=' + traefik_zerossl_file_ini) }}"+ traefik_zerossl_hmacencoded: "{{ lookup('ini', 'hmacencoded section=zerossl file=' + traefik_zerossl_file_ini) }}" - name: "ZeroSSL | New Credentials Block" when: not zerossl_ini.stat.exists@@ -56,16 +56,16 @@ - name: "ZeroSSL | Create directories" ansible.builtin.file:- path: "/opt/saltbox"+ path: "{{ server_appdata_path }}/saltbox" state: directory owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0775" when: (zerossl.status == 200) and zerossl.json.success - - name: "ZeroSSL | Add kid to '/opt/saltbox/zerossl.ini'"+ - name: "ZeroSSL | Add kid to '{{ traefik_zerossl_file_ini }}'" community.general.ini_file:- path: /opt/saltbox/zerossl.ini+ path: "{{ traefik_zerossl_file_ini }}" section: "zerossl" option: kid value: "{{ traefik_zerossl_kid }}"@@ -74,9 +74,9 @@ mode: "0664" when: (zerossl.status == 200) and zerossl.json.success - - name: "ZeroSSL | Add hmacencoded to '/opt/saltbox/zerossl.ini'"+ - name: "ZeroSSL | Add hmacencoded to '{{ traefik_zerossl_file_ini }}'" community.general.ini_file:- path: /opt/saltbox/zerossl.ini+ path: "{{ traefik_zerossl_file_ini }}" section: "zerossl" option: hmacencoded value: "{{ traefik_zerossl_hmacencoded }}"
modified
roles/traefik_template/defaults/main.yml
@@ -19,12 +19,13 @@ traefik_template_name: "{{ service_name.user_input | lower }}" -traefik_template_web_subdomain: "{{ service_name.user_input | lower }}"-traefik_template_web_domain: "{{ user.domain }}"-traefik_template_web_port: "{{ service_port.user_input }}"+traefik_template_role_web_subdomain: "{{ service_name.user_input | lower }}"+traefik_template_role_web_domain: "{{ user.domain }}"+traefik_template_role_web_port: "{{ service_port.user_input }}" -traefik_template_traefik_enabled: "true"-traefik_template_traefik_sso_middleware: "{{ traefik_default_sso_middleware if (service_sso_enabled.user_input | bool) else '' }}"-traefik_template_traefik_middleware_default: "{{ traefik_default_middleware }}"-traefik_template_traefik_api_enabled: "{{ (service_api_enabled.user_input | default(false) | bool) | default(false) }}"-traefik_template_traefik_api_endpoint: "PathPrefix(`/api`)"+traefik_template_role_traefik_enabled: "true"+traefik_template_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware if (service_sso_enabled.user_input | bool) else '' }}"+traefik_template_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+traefik_template_role_traefik_api_enabled: "{{ (service_api_enabled.user_input | default(false) | bool) | default(false) }}"+traefik_template_role_traefik_api_endpoint: "PathPrefix(`/api`)"+traefik_template_role_docker_network_mode: "{{ service_gluetun_container.user_input if ((service_gluetun_enabled.user_input | lower) | bool) else docker_networks_name_common }}"
modified
roles/traefik_template/tasks/main.yml
@@ -25,7 +25,7 @@ - name: Validate boolean input ansible.builtin.fail: msg: "You must enter a boolean value (yes/no)"- when: service_sso_enabled.user_input | lower not in ['true', 'false', 'yes', 'no']+ when: (service_sso_enabled.user_input | lower) not in ['true', 'false', 'yes', 'no'] - name: API Router when: (service_sso_enabled.user_input | bool)@@ -38,45 +38,32 @@ - name: Validate boolean input ansible.builtin.fail: msg: "You must enter a boolean value (yes/no)"- when: service_api_enabled.user_input | lower not in ['true', 'false', 'yes', 'no']+ when: (service_api_enabled.user_input | lower) not in ['true', 'false', 'yes', 'no'] -- name: Remove keys with omit values or empty values from docker_labels_common- ansible.builtin.set_fact:- docker_labels_common: "{{ docker_labels_common | dict2items |- rejectattr('value', 'equalto', '') |- rejectattr('value', 'search', '__omit_place_holder__') |- items2dict }}"+- name: Prompt for user input+ ansible.builtin.pause:+ prompt: "Do you want the container to use gluetun? (yes/no)"+ register: service_gluetun_enabled++- name: Validate boolean input+ ansible.builtin.fail:+ msg: "You must enter a boolean value (yes/no)"+ when: (service_gluetun_enabled.user_input | lower) not in ['true', 'false', 'yes', 'no']++- name: Prompt for user input+ ansible.builtin.pause:+ prompt: "Please enter the name of the gluetun container you want to use"+ register: service_gluetun_container+ when: (service_gluetun_enabled.user_input | bool) - name: Generate Docker Compose template- ansible.builtin.copy:+ ansible.builtin.template:+ src: docker-compose.yml.j2 dest: "{{ traefik_template_file }}"- content: |- services:- {{ traefik_template_name }}:- container_name: {{ traefik_template_name }}- environment: # Change this as needed for your image- PUID: "{{ uid }}"- PGID: "{{ gid }}"- TZ: "{{ tz }}"- hostname: {{ traefik_template_name }}- image: your_image:your_tag- labels:- {% for key, value in docker_labels_common.items() %}- {{ key }}: {{ value }}- {% endfor %}- networks:- - saltbox- restart: unless-stopped- volumes: # Change this as needed for your image- - /opt/{{ traefik_template_name }}:/config- - /etc/localtime:/etc/localtime:ro-- networks:- saltbox:- external: true owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0644"+ force: true - name: Print output information ansible.builtin.debug:
modified
roles/transfer/defaults/main.yml
@@ -17,135 +17,86 @@ # Paths ################################ -transfer_uploads_location: "/tmp"+transfer_role_uploads_location: "/tmp" ################################ # Web ################################ -transfer_web_subdomain: "{{ transfer_name }}"-transfer_web_domain: "{{ user.domain }}"-transfer_web_port: "8080"-transfer_web_user: "{{ user.name }}"-transfer_web_pass: "{{ user.pass }}"-transfer_web_url: "{{ 'https://' + (transfer_web_subdomain + '.' + transfer_web_domain- if (transfer_web_subdomain | length > 0)- else transfer_web_domain) }}"+transfer_role_web_subdomain: "{{ transfer_name }}"+transfer_role_web_domain: "{{ user.domain }}"+transfer_role_web_port: "8080"+transfer_role_web_user: "{{ user.name }}"+transfer_role_web_pass: "{{ user.pass }}"+transfer_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='transfer') + '.' + lookup('role_var', '_web_domain', role='transfer')+ if (lookup('role_var', '_web_subdomain', role='transfer') | length > 0)+ else lookup('role_var', '_web_domain', role='transfer')) }}" ################################ # DNS ################################ -transfer_dns_record: "{{ transfer_web_subdomain }}"-transfer_dns_zone: "{{ transfer_web_domain }}"-transfer_dns_proxy: "{{ dns.proxied }}"+transfer_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='transfer') }}"+transfer_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='transfer') }}"+transfer_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -transfer_traefik_sso_middleware: ""-transfer_traefik_middleware_default: "{{ traefik_default_middleware }}"-transfer_traefik_middleware_custom: ""-transfer_traefik_certresolver: "{{ traefik_default_certresolver }}"-transfer_traefik_enabled: true-transfer_traefik_api_enabled: false-transfer_traefik_api_endpoint: ""+transfer_role_traefik_sso_middleware: ""+transfer_role_traefik_middleware_default: "{{ traefik_default_middleware }}"+transfer_role_traefik_middleware_custom: ""+transfer_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+transfer_role_traefik_enabled: true+transfer_role_traefik_api_enabled: false+transfer_role_traefik_api_endpoint: "" ################################ # Docker ################################ # Container-transfer_docker_container: "{{ transfer_name }}"+transfer_role_docker_container: "{{ transfer_name }}" # Image-transfer_docker_image_pull: true-transfer_docker_image_tag: "latest"-transfer_docker_image: "dutchcoders/transfer.sh:{{ transfer_docker_image_tag }}"--# Ports-transfer_docker_ports_defaults: []-transfer_docker_ports_custom: []-transfer_docker_ports: "{{ transfer_docker_ports_defaults- + transfer_docker_ports_custom }}"+transfer_role_docker_image_pull: true+transfer_role_docker_image_repo: "dutchcoders/transfer.sh"+transfer_role_docker_image_tag: "latest"+transfer_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='transfer') }}:{{ lookup('role_var', '_docker_image_tag', role='transfer') }}" # Envs-transfer_docker_envs_default:+transfer_role_docker_envs_default: TZ: "{{ tz }}"- BASEDIR: "{{ transfer_uploads_location }}"+ BASEDIR: "{{ lookup('role_var', '_uploads_location', role='transfer') }}" PROVIDER: "local"- HTTP_AUTH_USER: "{{ transfer_web_user }}"- HTTP_AUTH_PASS: "{{ transfer_web_pass }}"-transfer_docker_envs_custom: {}-transfer_docker_envs: "{{ transfer_docker_envs_default- | combine(transfer_docker_envs_custom) }}"--# Commands-transfer_docker_commands_default: []-transfer_docker_commands_custom: []-transfer_docker_commands: "{{ transfer_docker_commands_default- + transfer_docker_commands_custom }}"--# Volumes-transfer_docker_volumes_default: []-transfer_docker_volumes_custom: []-transfer_docker_volumes: "{{ transfer_docker_volumes_default- + transfer_docker_volumes_custom }}"+ HTTP_AUTH_USER: "{{ lookup('role_var', '_web_user', role='transfer') }}"+ HTTP_AUTH_PASS: "{{ lookup('role_var', '_web_pass', role='transfer') }}"+transfer_role_docker_envs_custom: {}+transfer_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='transfer')+ | combine(lookup('role_var', '_docker_envs_custom', role='transfer')) }}" # Mounts-transfer_docker_mounts_default:+transfer_role_docker_mounts_default: - target: /tmp type: tmpfs-transfer_docker_mounts_custom: []-transfer_docker_mounts: "{{ transfer_docker_mounts_default- + transfer_docker_mounts_custom }}"--# Devices-transfer_docker_devices_default: []-transfer_docker_devices_custom: []-transfer_docker_devices: "{{ transfer_docker_devices_default- + transfer_docker_devices_custom }}"--# Hosts-transfer_docker_hosts_default: {}-transfer_docker_hosts_custom: {}-transfer_docker_hosts: "{{ docker_hosts_common- | combine(transfer_docker_hosts_default)- | combine(transfer_docker_hosts_custom) }}"--# Labels-transfer_docker_labels_default: {}-transfer_docker_labels_custom: {}-transfer_docker_labels: "{{ docker_labels_common- | combine(transfer_docker_labels_default)- | combine(transfer_docker_labels_custom) }}"+transfer_role_docker_mounts_custom: []+transfer_role_docker_mounts: "{{ lookup('role_var', '_docker_mounts_default', role='transfer')+ + lookup('role_var', '_docker_mounts_custom', role='transfer') }}" # Hostname-transfer_docker_hostname: "{{ transfer_name }}"+transfer_role_docker_hostname: "{{ transfer_name }}" # Networks-transfer_docker_networks_alias: "{{ transfer_name }}"-transfer_docker_networks_default: []-transfer_docker_networks_custom: []-transfer_docker_networks: "{{ docker_networks_common- + transfer_docker_networks_default- + transfer_docker_networks_custom }}"--# Capabilities-transfer_docker_capabilities_default: []-transfer_docker_capabilities_custom: []-transfer_docker_capabilities: "{{ transfer_docker_capabilities_default- + transfer_docker_capabilities_custom }}"--# Security Opts-transfer_docker_security_opts_default: []-transfer_docker_security_opts_custom: []-transfer_docker_security_opts: "{{ transfer_docker_security_opts_default- + transfer_docker_security_opts_custom }}"+transfer_role_docker_networks_alias: "{{ transfer_name }}"+transfer_role_docker_networks_default: []+transfer_role_docker_networks_custom: []+transfer_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='transfer')+ + lookup('role_var', '_docker_networks_custom', role='transfer') }}" # Restart Policy-transfer_docker_restart_policy: unless-stopped+transfer_role_docker_restart_policy: unless-stopped # State-transfer_docker_state: started+transfer_role_docker_state: started
modified
roles/transfer/tasks/main.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"
modified
roles/unionfs/defaults/main.yml
@@ -11,7 +11,7 @@ # Global ################################ -local_mount_branch: "/mnt/local=RW:"+local_mount_branch: "{{ server_local_folder_path }}=RW:" custom_mount_branch: "" # Format: "/mnt/remote/someremote=NC" @@ -31,11 +31,11 @@ mergerfs_releases_download_url: https://github.com/trapexit/mergerfs/releases/download -mergerfs_release_distribution: "{{ ansible_distribution_release | lower }}"+mergerfs_release_distribution: "{{ ansible_facts['distribution_release'] | lower }}" mergerfs_release_lookup_command: | curl -s {{ mergerfs_releases_url }} \- | jq -r ".assets[] | select(.name | test(\"{{ ansible_distribution | lower }}-{{ mergerfs_release_distribution }}_amd64\")) \+ | jq -r ".assets[] | select(.name | test(\"{{ ansible_facts['distribution'] | lower }}-{{ mergerfs_release_distribution }}_amd64\")) \ | .browser_download_url" mergerfs_download_backup_version: 2.40.2@@ -44,7 +44,7 @@ {{ mergerfs_releases_download_url }}/\ {{ mergerfs_download_backup_version }}/\ mergerfs_{{ mergerfs_download_backup_version }}.\- {{ ansible_distribution | lower }}-\+ {{ ansible_facts['distribution'] | lower }}-\ {{ mergerfs_release_distribution }}_amd64.deb" mergerfs_mount_branches: "{{ local_mount_branch }}{{ _remotes_list }}"
modified
roles/unionfs/tasks/main.yml
@@ -7,23 +7,15 @@ # GNU General Public License v3.0 # ######################################################################### ----# Variables- - name: Variables Task- ansible.builtin.import_tasks: "subtasks/variables.yml"--# Stop Docker Containers+ ansible.builtin.include_tasks: "subtasks/variables.yml" - name: Docker Containers Stop Tasks- ansible.builtin.import_tasks: "subtasks/docker/containers_stop.yml"+ ansible.builtin.include_tasks: "subtasks/docker/containers_stop.yml" when: (not containers_list is defined) -# Existing Setup- - name: Legacy Task- ansible.builtin.import_tasks: "subtasks/legacy.yml"--# Mount Mergerfs+ ansible.builtin.include_tasks: "subtasks/legacy.yml" - name: MergerFS Tasks ansible.builtin.include_tasks: "subtasks/mergerfs.yml"@@ -32,12 +24,10 @@ when: ('mounts' in ansible_run_tags) block: - name: Docker Daemon Tasks- ansible.builtin.import_tasks: "subtasks/docker/daemon.yml"-- # Start Docker Containers+ ansible.builtin.include_tasks: "subtasks/docker/daemon.yml" - name: Docker Containers Start Tasks- ansible.builtin.import_tasks: "subtasks/docker/containers_start.yml"+ ansible.builtin.include_tasks: "subtasks/docker/containers_start.yml" when: (containers_list is defined) - name: Docker Containers Start Tasks (no running containers)
modified
roles/unionfs/tasks/subtasks/docker/containers_start.yml
@@ -7,31 +7,31 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: "Docker | Containers Start | Initialize Gluetun variable"+- name: "UnionFS | Docker | Containers Start | Initialize Gluetun variable" ansible.builtin.set_fact: docker_containers_gluetun: [] -- name: "Docker | Containers Start | Identify any Gluetun containers"+- name: "UnionFS | Docker | Containers Start | Identify any Gluetun containers" ansible.builtin.set_fact: docker_containers_gluetun: "{{ docker_containers_gluetun + [item] }}" loop: "{{ containers_list.split() }}" when: item in (gluetun_instances | default(['gluetun'])) -- name: "Docker | Containers Start | Re-start all previously running Gluetun containers"+- name: "UnionFS | Docker | Containers Start | Re-start all previously running Gluetun containers" ansible.builtin.shell: "docker start {{ docker_containers_gluetun | join(' ') }}" when: (docker_containers_gluetun | length > 0) ignore_errors: true -- name: "Docker | Containers Start | Wait for {{ docker_network_container_health_delay }} seconds"+- name: "UnionFS | Docker | Containers Start | Wait for {{ docker_network_container_health_delay }} seconds" ansible.builtin.wait_for: timeout: "{{ docker_network_container_health_delay }}" -- name: "Docker | Containers Start | Start Saltbox Docker containers"+- name: "UnionFS | Docker | Containers Start | Start Saltbox Docker containers" ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/start_saltbox_docker_containers.yml" when: (remote_docker_controller_service_running is defined and remote_docker_controller_service_running) or (unionfs_docker_controller_service_running is defined and unionfs_docker_controller_service_running) -- name: "Docker | Containers Start | Start all previously running Docker containers"+- name: "UnionFS | Docker | Containers Start | Start all previously running Docker containers" ansible.builtin.shell: "docker start {{ containers_list }}" ignore_errors: true when: (containers_list | trim | length > 0)
modified
roles/unionfs/tasks/subtasks/docker/containers_stop.yml
@@ -17,8 +17,8 @@ - name: "UnionFS | Docker | Container Stop | Get Docker service state" ansible.builtin.set_fact:- unionfs_docker_service_running: "{{ (services['docker.service'] is defined) and (services['docker.service']['state'] == 'running') }}"- unionfs_docker_controller_service_running: "{{ (services['saltbox_managed_docker_controller.service'] is defined) and (services['saltbox_managed_docker_controller.service']['state'] == 'running') }}"+ unionfs_docker_service_running: "{{ (ansible_facts['services']['docker.service'] is defined) and (ansible_facts['services']['docker.service']['state'] == 'running') }}"+ unionfs_docker_controller_service_running: "{{ (ansible_facts['services']['saltbox_managed_docker_controller.service'] is defined) and (ansible_facts['services']['saltbox_managed_docker_controller.service']['state'] == 'running') }}" when: unionfs_docker_binary.stat.exists - name: "UnionFS | Docker | Container Stop | Tasks for when Docker exists and is running"
modified
roles/unionfs/tasks/subtasks/mergerfs.yml
@@ -7,7 +7,7 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: "Check if '{{ mergerfs_service_name }}' exists"+- name: "MergerFS | Check if '{{ mergerfs_service_name }}' exists" ansible.builtin.stat: path: "/etc/systemd/system/{{ mergerfs_service_name }}" register: saltbox_managed_mergerfs_status@@ -67,8 +67,8 @@ ansible.builtin.systemd_service: daemon_reload: true -- name: Mount Path Tasks- ansible.builtin.import_tasks: "mount_path.yml"+- name: "MergerFS | Mount Path Tasks"+ ansible.builtin.include_tasks: "mount_path.yml" - name: "MergerFS | Start '{{ mergerfs_service_name }}'" ansible.builtin.systemd_service:
modified
roles/unionfs/tasks/subtasks/mount_path.yml
@@ -45,7 +45,7 @@ when: mnt_unionfs_stat2.stat.exists - name: "Mount Path | Backup non-empty '/mnt/unionfs' path"- ansible.builtin.shell: "mv /mnt/unionfs /mnt/unionfs_{{ '%Y-%m-%d_%H.%M.%S' | strftime(ansible_date_time['epoch'] | int) }}"+ ansible.builtin.shell: "mv /mnt/unionfs /mnt/unionfs_{{ '%Y-%m-%d_%H.%M.%S' | strftime(ansible_facts['date_time']['epoch'] | int) }}" ignore_errors: true when: mnt_unionfs_stat2.stat.exists and (mnt_unionfs_files.matched | int > 0)
modified
roles/unpackerr/defaults/main.yml
@@ -17,98 +17,55 @@ # Paths ################################ -unpackerr_paths_folder: "{{ unpackerr_name }}"-unpackerr_paths_location: "{{ server_appdata_path }}/{{ unpackerr_paths_folder }}"-unpackerr_config_location: "{{ unpackerr_paths_location }}/unpackerr.conf"-unpackerr_paths_folders_list:- - "{{ unpackerr_paths_location }}"+unpackerr_role_paths_folder: "{{ unpackerr_name }}"+unpackerr_role_paths_location: "{{ server_appdata_path }}/{{ unpackerr_role_paths_folder }}"+unpackerr_role_paths_config_location: "{{ unpackerr_role_paths_location }}/unpackerr.conf"+unpackerr_role_paths_folders_list:+ - "{{ unpackerr_role_paths_location }}" ################################ # Docker ################################ # Container-unpackerr_docker_container: "{{ unpackerr_name }}"+unpackerr_role_docker_container: "{{ unpackerr_name }}" # Image-unpackerr_docker_image_pull: true-unpackerr_docker_image_tag: "latest"-unpackerr_docker_image: "ghcr.io/hotio/unpackerr:{{ unpackerr_docker_image_tag }}"--# Ports-unpackerr_docker_ports_defaults: []-unpackerr_docker_ports_custom: []-unpackerr_docker_ports: "{{ unpackerr_docker_ports_defaults- + unpackerr_docker_ports_custom }}"+unpackerr_role_docker_image_pull: true+unpackerr_role_docker_image_repo: "ghcr.io/hotio/unpackerr"+unpackerr_role_docker_image_tag: "latest"+unpackerr_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='unpackerr') }}:{{ lookup('role_var', '_docker_image_tag', role='unpackerr') }}" # Envs-unpackerr_docker_envs_default:+unpackerr_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" TZ: "{{ tz }}" UMASK: "002"-unpackerr_docker_envs_custom: {}-unpackerr_docker_envs: "{{ unpackerr_docker_envs_default- | combine(unpackerr_docker_envs_custom) }}"--# Commands-unpackerr_docker_commands_default: []-unpackerr_docker_commands_custom: []-unpackerr_docker_commands: "{{ unpackerr_docker_commands_default- + unpackerr_docker_commands_custom }}"+unpackerr_role_docker_envs_custom: {}+unpackerr_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='unpackerr')+ | combine(lookup('role_var', '_docker_envs_custom', role='unpackerr')) }}" # Volumes-unpackerr_docker_volumes_default:- - "{{ unpackerr_paths_location }}:/config"-unpackerr_docker_volumes_custom: []-unpackerr_docker_volumes: "{{ unpackerr_docker_volumes_default- + unpackerr_docker_volumes_custom }}"--# Devices-unpackerr_docker_devices_default: []-unpackerr_docker_devices_custom: []-unpackerr_docker_devices: "{{ unpackerr_docker_devices_default- + unpackerr_docker_devices_custom }}"--# Hosts-unpackerr_docker_hosts_default: {}-unpackerr_docker_hosts_custom: {}-unpackerr_docker_hosts: "{{ docker_hosts_common- | combine(unpackerr_docker_hosts_default)- | combine(unpackerr_docker_hosts_custom) }}"--# Labels-unpackerr_docker_labels_default: {}-unpackerr_docker_labels_custom: {}-unpackerr_docker_labels: "{{ docker_labels_common- | combine(unpackerr_docker_labels_default)- | combine(unpackerr_docker_labels_custom) }}"+unpackerr_role_docker_volumes_default:+ - "{{ lookup('role_var', '_paths_location', role='unpackerr') }}:/config"+unpackerr_role_docker_volumes_custom: []+unpackerr_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='unpackerr')+ + lookup('role_var', '_docker_volumes_custom', role='unpackerr') }}" # Hostname-unpackerr_docker_hostname: "{{ unpackerr_name }}"+unpackerr_role_docker_hostname: "{{ unpackerr_name }}" # Networks-unpackerr_docker_networks_alias: "{{ unpackerr_name }}"-unpackerr_docker_networks_default: []-unpackerr_docker_networks_custom: []-unpackerr_docker_networks: "{{ docker_networks_common- + unpackerr_docker_networks_default- + unpackerr_docker_networks_custom }}"--# Capabilities-unpackerr_docker_capabilities_default: []-unpackerr_docker_capabilities_custom: []-unpackerr_docker_capabilities: "{{ unpackerr_docker_capabilities_default- + unpackerr_docker_capabilities_custom }}"--# Security Opts-unpackerr_docker_security_opts_default: []-unpackerr_docker_security_opts_custom: []-unpackerr_docker_security_opts: "{{ unpackerr_docker_security_opts_default- + unpackerr_docker_security_opts_custom }}"+unpackerr_role_docker_networks_alias: "{{ unpackerr_name }}"+unpackerr_role_docker_networks_default: []+unpackerr_role_docker_networks_custom: []+unpackerr_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='unpackerr')+ + lookup('role_var', '_docker_networks_custom', role='unpackerr') }}" # Restart Policy-unpackerr_docker_restart_policy: unless-stopped+unpackerr_role_docker_restart_policy: unless-stopped # State-unpackerr_docker_state: started+unpackerr_role_docker_state: started
modified
roles/unpackerr/tasks/main.yml
@@ -14,7 +14,7 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/directories/create_directories.yml" - name: Import Settings task- ansible.builtin.import_tasks: "subtasks/settings.yml"+ ansible.builtin.include_tasks: "subtasks/settings.yml" - name: Create Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml"
modified
roles/unpackerr/tasks/subtasks/settings.yml
@@ -7,12 +7,12 @@ # GNU General Public License v3.0 # ######################################################################### ----- name: Settings | Check if `{{ unpackerr_config_location | basename }}` exists+- name: Settings | Check if `{{ lookup('role_var', '_paths_config_location', role='unpackerr') | basename }}` exists ansible.builtin.stat:- path: "{{ unpackerr_config_location }}"+ path: "{{ lookup('role_var', '_paths_config_location', role='unpackerr') }}" register: unpackerr_config -- name: Settings | New `{{ unpackerr_config_location | basename }}` tasks+- name: Settings | New `{{ lookup('role_var', '_paths_config_location', role='unpackerr') | basename }}` tasks when: not unpackerr_config.stat.exists block: - name: Get Instance Info@@ -22,12 +22,11 @@ - sonarr - radarr - lidarr- - readarr - - name: Settings | Import default `{{ unpackerr_config_location | basename }}`+ - name: Settings | Import default `{{ lookup('role_var', '_paths_config_location', role='unpackerr') | basename }}` ansible.builtin.template: src: unpackerr.conf.j2- dest: "{{ unpackerr_config_location }}"+ dest: "{{ lookup('role_var', '_paths_config_location', role='unpackerr') }}" owner: "{{ user.name }}" group: "{{ user.name }}" mode: "0664"
modified
roles/unpackerr/templates/unpackerr.conf.j2
@@ -99,7 +99,7 @@ [[sonarr]] url = "{{ sonarr_info[instance]['url'] }}" api_key = "{{ sonarr_info[instance]['api_key'] }}"- paths = ["/mnt/unionfs/downloads/torrents/rutorrent/completed"]+ paths = ["/mnt/unionfs/downloads/torrents/qbittorrent/completed"] protocols = "torrent" timeout = "10s" delete_delay = "5m"@@ -134,7 +134,7 @@ [[radarr]] url = "{{ radarr_info[instance]['url'] }}" api_key = "{{ radarr_info[instance]['api_key'] }}"- paths = ["/mnt/unionfs/downloads/torrents/rutorrent/completed"]+ paths = ["/mnt/unionfs/downloads/torrents/qbittorrent/completed"] protocols = "torrent" timeout = "10s" delete_delay = "5m"@@ -169,7 +169,7 @@ [[lidarr]] url = "{{ lidarr_info[instance]['url'] }}" api_key = "{{ lidarr_info[instance]['api_key'] }}"- paths = ["/mnt/unionfs/downloads/torrents/rutorrent/completed"]+ paths = ["/mnt/unionfs/downloads/torrents/qbittorrent/completed"] protocols = "torrent" timeout = "10s" delete_delay = "5m"@@ -198,19 +198,6 @@ ## General recommendation is: do not enable this for torrent use. ## Setting this to true deletes the entire original download folder after import. # delete_orig = false--{% for instance in readarr_info %}-{% if readarr_info[instance]['api_key'] != 'not installed' %}-[[readarr]]- url = "{{ readarr_info[instance]['url'] }}"- api_key = "{{ readarr_info[instance]['api_key'] }}"- paths = ["/mnt/unionfs/downloads/torrents/rutorrent/completed"]- protocols = "torrent"- timeout = "10s"- delete_delay = "5m"- delete_orig = false-{% endif %}-{% endfor %} ##-Folders-####################################################################### ## This application can also watch folders for things to extract. If you copy a ##
modified
roles/user/tasks/main.yml
@@ -8,4 +8,4 @@ ######################################################################### --- - name: User Account- ansible.builtin.import_tasks: "subtasks/user_account.yml"+ ansible.builtin.include_tasks: "subtasks/user_account.yml"
modified
roles/user/tasks/subtasks/user_account.yml
@@ -41,6 +41,7 @@ skeleton: /etc/skel uid: "{{ user_id | default(omit) }}" register: user_info+ no_log: true - name: Check for skeleton files in user home directory ansible.builtin.stat:@@ -72,8 +73,8 @@ - name: User Account | Set 'uid', 'gid', 'vgid' and 'rgid' ansible.builtin.set_fact:- uid: "{{ user_info.uid }}"- gid: "{{ user_info.group }}"+ uid: "{{ user_info.uid | string }}"+ gid: "{{ user_info.group | string }}" vgid: "{{ vgid_lookup.stdout }}" rgid: "{{ rgid_lookup.stdout }}"
modified
roles/whisparr/defaults/main.yml
@@ -17,7 +17,7 @@ # Settings ################################ -whisparr_external_auth: true+whisparr_role_external_auth: true ################################ # Paths@@ -33,150 +33,100 @@ # Web ################################ -whisparr_web_subdomain: "{{ whisparr_name }}"-whisparr_web_domain: "{{ user.domain }}"-whisparr_web_port: "6969"-whisparr_web_url: "{{ 'https://' + (lookup('vars', whisparr_name + '_web_subdomain', default=whisparr_web_subdomain) + '.' + lookup('vars', whisparr_name + '_web_domain', default=whisparr_web_domain)- if (lookup('vars', whisparr_name + '_web_subdomain', default=whisparr_web_subdomain) | length > 0)- else lookup('vars', whisparr_name + '_web_domain', default=whisparr_web_domain)) }}"+whisparr_role_web_subdomain: "{{ whisparr_name }}"+whisparr_role_web_domain: "{{ user.domain }}"+whisparr_role_web_port: "6969"+whisparr_role_web_url: "{{ 'https://' + (lookup('role_var', '_web_subdomain', role='whisparr') + '.' + lookup('role_var', '_web_domain', role='whisparr')+ if (lookup('role_var', '_web_subdomain', role='whisparr') | length > 0)+ else lookup('role_var', '_web_domain', role='whisparr')) }}" ################################ # DNS ################################ -whisparr_dns_record: "{{ lookup('vars', whisparr_name + '_web_subdomain', default=whisparr_web_subdomain) }}"-whisparr_dns_zone: "{{ lookup('vars', whisparr_name + '_web_domain', default=whisparr_web_domain) }}"-whisparr_dns_proxy: "{{ dns.proxied }}"+whisparr_role_dns_record: "{{ lookup('role_var', '_web_subdomain', role='whisparr') }}"+whisparr_role_dns_zone: "{{ lookup('role_var', '_web_domain', role='whisparr') }}"+whisparr_role_dns_proxy: "{{ dns_proxied }}" ################################ # Traefik ################################ -whisparr_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"-whisparr_traefik_middleware_default: "{{ traefik_default_middleware- + (',themepark-' + lookup('vars', whisparr_name + '_name', default=whisparr_name)- if (whisparr_themepark_enabled and global_themepark_plugin_enabled)- else '') }}"-whisparr_traefik_middleware_custom: ""-whisparr_traefik_certresolver: "{{ traefik_default_certresolver }}"-whisparr_traefik_enabled: true-whisparr_traefik_api_enabled: true-whisparr_traefik_api_endpoint: "PathPrefix(`/api`)"+whisparr_role_traefik_sso_middleware: "{{ traefik_default_sso_middleware }}"+whisparr_role_traefik_middleware_default: "{{ traefik_default_middleware+ + (',themepark-' + whisparr_name+ if (lookup('role_var', '_themepark_enabled', role='whisparr') and global_themepark_plugin_enabled)+ else '') }}"+whisparr_role_traefik_middleware_custom: ""+whisparr_role_traefik_certresolver: "{{ traefik_default_certresolver }}"+whisparr_role_traefik_enabled: true+whisparr_role_traefik_api_enabled: true+whisparr_role_traefik_api_endpoint: "PathPrefix(`/api`)" ################################-# API-################################--# default to blank-whisparr_api_key:--################################-# THEME+# Theme ################################ # Options can be found at https://github.com/themepark-dev/theme.park-whisparr_themepark_enabled: false-whisparr_themepark_app: "whisparr"-whisparr_themepark_theme: "{{ global_themepark_theme }}"-whisparr_themepark_domain: "{{ global_themepark_domain }}"-whisparr_themepark_addons: []+whisparr_role_themepark_enabled: false+whisparr_role_themepark_app: "whisparr"+whisparr_role_themepark_theme: "{{ global_themepark_theme }}"+whisparr_role_themepark_domain: "{{ global_themepark_domain }}"+whisparr_role_themepark_addons: [] ################################ # Docker ################################ # Container-whisparr_docker_container: "{{ whisparr_name }}"+whisparr_role_docker_container: "{{ whisparr_name }}" # Image-whisparr_docker_image_pull: true-whisparr_docker_image_repo: "ghcr.io/hotio/whisparr"-whisparr_docker_image_tag: "nightly"-whisparr_docker_image: "{{ lookup('vars', whisparr_name + '_docker_image_repo', default=whisparr_docker_image_repo)- + ':' + lookup('vars', whisparr_name + '_docker_image_tag', default=whisparr_docker_image_tag) }}"--# Ports-whisparr_docker_ports_defaults: []-whisparr_docker_ports_custom: []-whisparr_docker_ports: "{{ lookup('vars', whisparr_name + '_docker_ports_defaults', default=whisparr_docker_ports_defaults)- + lookup('vars', whisparr_name + '_docker_ports_custom', default=whisparr_docker_ports_custom) }}"+whisparr_role_docker_image_pull: true+whisparr_role_docker_image_repo: "ghcr.io/hotio/whisparr"+whisparr_role_docker_image_tag: "nightly"+whisparr_role_docker_image: "{{ lookup('role_var', '_docker_image_repo', role='whisparr') }}:{{ lookup('role_var', '_docker_image_tag', role='whisparr') }}" # Envs-whisparr_docker_envs_default:+whisparr_role_docker_envs_default: PUID: "{{ uid }}" PGID: "{{ gid }}" UMASK: "002" TZ: "{{ tz }}"-whisparr_docker_envs_custom: {}-whisparr_docker_envs: "{{ lookup('vars', whisparr_name + '_docker_envs_default', default=whisparr_docker_envs_default)- | combine(lookup('vars', whisparr_name + '_docker_envs_custom', default=whisparr_docker_envs_custom)) }}"--# Commands-whisparr_docker_commands_default: []-whisparr_docker_commands_custom: []-whisparr_docker_commands: "{{ lookup('vars', whisparr_name + '_docker_commands_default', default=whisparr_docker_commands_default)- + lookup('vars', whisparr_name + '_docker_commands_custom', default=whisparr_docker_commands_custom) }}"+whisparr_role_docker_envs_custom: {}+whisparr_role_docker_envs: "{{ lookup('role_var', '_docker_envs_default', role='whisparr')+ | combine(lookup('role_var', '_docker_envs_custom', role='whisparr')) }}" # Volumes-whisparr_docker_volumes_default:+whisparr_role_docker_volumes_default: - "{{ whisparr_paths_location }}:/config" - "{{ server_appdata_path }}/scripts:/scripts"-whisparr_docker_volumes_custom: []-whisparr_docker_volumes: "{{ lookup('vars', whisparr_name + '_docker_volumes_default', default=whisparr_docker_volumes_default)- + lookup('vars', whisparr_name + '_docker_volumes_custom', default=whisparr_docker_volumes_custom) }}"--# Devices-whisparr_docker_devices_default: []-whisparr_docker_devices_custom: []-whisparr_docker_devices: "{{ lookup('vars', whisparr_name + '_docker_devices_default', default=whisparr_docker_devices_default)- + lookup('vars', whisparr_name + '_docker_devices_custom', default=whisparr_docker_devices_custom) }}"--# Hosts-whisparr_docker_hosts_default: {}-whisparr_docker_hosts_custom: {}-whisparr_docker_hosts: "{{ docker_hosts_common- | combine(lookup('vars', whisparr_name + '_docker_hosts_default', default=whisparr_docker_hosts_default))- | combine(lookup('vars', whisparr_name + '_docker_hosts_custom', default=whisparr_docker_hosts_custom)) }}"+whisparr_role_docker_volumes_custom: []+whisparr_role_docker_volumes: "{{ lookup('role_var', '_docker_volumes_default', role='whisparr')+ + lookup('role_var', '_docker_volumes_custom', role='whisparr') }}" # Labels-whisparr_docker_labels_default: {}-whisparr_docker_labels_custom: {}-whisparr_docker_labels: "{{ docker_labels_common- | combine(lookup('vars', whisparr_name + '_docker_labels_default', default=whisparr_docker_labels_default))- | combine((traefik_themepark_labels- if (whisparr_themepark_enabled and global_themepark_plugin_enabled)- else {}),- lookup('vars', whisparr_name + '_docker_labels_custom', default=whisparr_docker_labels_custom)) }}"+whisparr_role_docker_labels_default: {}+whisparr_role_docker_labels_custom: {}+whisparr_role_docker_labels: "{{ lookup('role_var', '_docker_labels_default', role='whisparr')+ | combine((traefik_themepark_labels+ if (lookup('role_var', '_themepark_enabled', role='whisparr') and global_themepark_plugin_enabled)+ else {}),+ lookup('role_var', '_docker_labels_custom', role='whisparr')) }}" # Hostname-whisparr_docker_hostname: "{{ whisparr_name }}"--# Network Mode-whisparr_docker_network_mode_default: "{{ docker_networks_name_common }}"-whisparr_docker_network_mode: "{{ lookup('vars', whisparr_name + '_docker_network_mode_default', default=whisparr_docker_network_mode_default) }}"+whisparr_role_docker_hostname: "{{ whisparr_name }}" # Networks-whisparr_docker_networks_alias: "{{ whisparr_name }}"-whisparr_docker_networks_default: []-whisparr_docker_networks_custom: []-whisparr_docker_networks: "{{ docker_networks_common- + lookup('vars', whisparr_name + '_docker_networks_default', default=whisparr_docker_networks_default)- + lookup('vars', whisparr_name + '_docker_networks_custom', default=whisparr_docker_networks_custom) }}"--# Capabilities-whisparr_docker_capabilities_default: []-whisparr_docker_capabilities_custom: []-whisparr_docker_capabilities: "{{ lookup('vars', whisparr_name + '_docker_capabilities_default', default=whisparr_docker_capabilities_default)- + lookup('vars', whisparr_name + '_docker_capabilities_custom', default=whisparr_docker_capabilities_custom) }}"--# Security Opts-whisparr_docker_security_opts_default: []-whisparr_docker_security_opts_custom: []-whisparr_docker_security_opts: "{{ lookup('vars', whisparr_name + '_docker_security_opts_default', default=whisparr_docker_security_opts_default)- + lookup('vars', whisparr_name + '_docker_security_opts_custom', default=whisparr_docker_security_opts_custom) }}"+whisparr_role_docker_networks_alias: "{{ whisparr_name }}"+whisparr_role_docker_networks_default: []+whisparr_role_docker_networks_custom: []+whisparr_role_docker_networks: "{{ docker_networks_common+ + lookup('role_var', '_docker_networks_default', role='whisparr')+ + lookup('role_var', '_docker_networks_custom', role='whisparr') }}" # Restart Policy-whisparr_docker_restart_policy: unless-stopped+whisparr_role_docker_restart_policy: unless-stopped # State-whisparr_docker_state: started+whisparr_role_docker_state: started
modified
roles/whisparr/tasks/main2.yml
@@ -10,9 +10,9 @@ - name: Add DNS record ansible.builtin.include_tasks: "{{ resources_tasks_path }}/dns/tasker.yml" vars:- dns_record: "{{ lookup('vars', role_name + '_dns_record') }}"- dns_zone: "{{ lookup('vars', role_name + '_dns_zone') }}"- dns_proxy: "{{ lookup('vars', role_name + '_dns_proxy') }}"+ dns_record: "{{ lookup('role_var', '_dns_record') }}"+ dns_zone: "{{ lookup('role_var', '_dns_zone') }}"+ dns_proxy: "{{ lookup('role_var', '_dns_proxy') }}" - name: Remove existing Docker container ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/remove_docker_container.yml"@@ -24,5 +24,5 @@ ansible.builtin.include_tasks: "{{ resources_tasks_path }}/docker/create_docker_container.yml" - name: "Tweak Settings when SSO is enabled"- ansible.builtin.import_tasks: "subtasks/auth.yml"- when: (lookup('vars', whisparr_name + '_traefik_sso_middleware', default=whisparr_traefik_sso_middleware) | length > 0) and lookup('vars', whisparr_name + '_external_auth', default=whisparr_external_auth)+ ansible.builtin.include_tasks: "subtasks/auth.yml"+ when: (lookup('role_var', '_traefik_sso_middleware', role='whisparr') | length > 0) and lookup('role_var', '_external_auth', role='whisparr')
modified
roles/whisparr/tasks/subtasks/auth.yml
@@ -9,7 +9,7 @@ --- - name: Auth | Wait for 'config.xml' to be created ansible.builtin.wait_for:- path: "/opt/{{ whisparr_name }}/config.xml"+ path: "{{ server_appdata_path }}/{{ whisparr_name }}/config.xml" state: present - name: Auth | Wait for 10 seconds@@ -18,7 +18,7 @@ - name: Auth | Lookup AuthenticationMethod value community.general.xml:- path: "/opt/{{ whisparr_name }}/config.xml"+ path: "{{ server_appdata_path }}/{{ whisparr_name }}/config.xml" xpath: "/Config/AuthenticationMethod" content: "text" register: xmlresp@@ -28,7 +28,7 @@ block: - name: Auth | Change the 'AuthenticationMethod' attribute to 'External' community.general.xml:- path: "/opt/{{ whisparr_name }}/config.xml"+ path: "{{ server_appdata_path }}/{{ whisparr_name }}/config.xml" xpath: "/Config/AuthenticationMethod" value: "External"
modified
roles/yyq/tasks/main.yml
@@ -33,7 +33,7 @@ block: - name: Install common packages ansible.builtin.apt:- state: present+ state: latest name: - curl - jq
modified
saltbox.yml
@@ -12,7 +12,6 @@ - ['hetzner_vlan.yml', 'defaults/hetzner_vlan.yml.default'] roles: - { role: user_check, tags: ['always'] }- - { role: settings, tags: ['settings'] } - { role: pre_tasks, tags: ['always', 'pre-tasks'] } - { role: sanity_check, tags: ['always', 'sanity-check'] } - { role: backup, tags: ['backup'] }@@ -47,6 +46,7 @@ - { role: portainer, tags: ['portainer'] } - { role: organizr, tags: ['organizr'] } - { role: cloudplow, tags: ['saltbox', 'feederbox', 'cloudplow', 'cloudplow-reset'], when: ['use_cloudplow'] }+ - { role: cloudplow_disable, tags: ['cloudplow-disable']} - { role: sonarr, tags: ['sonarr'] } - { role: radarr, tags: ['radarr'] } - { role: lidarr, tags: ['lidarr'] }@@ -87,11 +87,11 @@ - { role: plex, tags: ['plex', 'plex-reset-codecs', 'plex-reclaim'] } - { role: plex_db, tags: ['plex-db'] } - { role: postgres, tags: ['postgres'] }+ - { role: postgres_host, tags: ['postgres-host'] } - { role: prometheus, tags: ['prometheus'] } - { role: prowlarr, tags: ['prowlarr'] } - { role: python, tags: ['python'] } - { role: qbittorrent, tags: ['qbittorrent'] }- - { role: readarr, tags: ['readarr'] } - { role: redis, tags: ['redis'] } - { role: rutorrent, tags: ['rutorrent'] } - { role: sabnzbd, tags: ['sabnzbd'] }@@ -110,9 +110,7 @@ - { role: plex_auth_token, tags: ['plex-auth-token'] } - { role: plex_fix_futures, tags: ['plex-fix-futures'] } - { role: saltbox_mod, tags: ['saltbox-mod'] }- - { role: sub_zero, tags: ['plex-plugins-sub-zero', 'plex-plugins-sub-zero-reinstall'] } - { role: traefik_file_template, tags: ['generate-traefik-file-template'] } - { role: traefik_template, tags: ['generate-traefik-template'] }- - { role: webtools, tags: ['plex-plugins-webtools', 'plex-plugins-webtools-reinstall'] } # Reboot checker - { role: reboot, tags: ['core', 'saltbox', 'mediabox', 'feederbox', 'system'] }