Linux server.edchosting.com 4.18.0-553.79.1.lve.el7h.x86_64 #1 SMP Wed Oct 15 16:34:46 UTC 2025 x86_64
LiteSpeed
Server IP : 75.98.162.185 & Your IP : 216.73.216.163
Domains :
Cant Read [ /etc/named.conf ]
User : goons4good
Terminal
Auto Root
Create File
Create Folder
Localroot Suggester
Backdoor Destroyer
Readme
/
lib /
python3.6 /
site-packages /
salt /
states /
Delete
Unzip
Name
Size
Permission
Date
Action
__pycache__
[ DIR ]
drwxr-xr-x
2022-10-11 05:09
__init__.py
25
B
-rw-r--r--
2022-05-16 09:16
acme.py
5.08
KB
-rw-r--r--
2022-05-16 09:16
alias.py
2.49
KB
-rw-r--r--
2022-05-16 09:16
alternatives.py
6.75
KB
-rw-r--r--
2022-05-16 09:16
ansiblegate.py
7.61
KB
-rw-r--r--
2022-05-16 09:16
apache.py
3.95
KB
-rw-r--r--
2022-05-16 09:16
apache_conf.py
2.72
KB
-rw-r--r--
2022-05-16 09:16
apache_module.py
2.73
KB
-rw-r--r--
2022-05-16 09:16
apache_site.py
2.66
KB
-rw-r--r--
2022-05-16 09:16
aptpkg.py
1.42
KB
-rw-r--r--
2022-05-16 09:16
archive.py
67.76
KB
-rw-r--r--
2022-05-16 09:16
artifactory.py
6.84
KB
-rw-r--r--
2022-05-16 09:16
at.py
7.54
KB
-rw-r--r--
2022-05-16 09:16
augeas.py
10.57
KB
-rw-r--r--
2022-05-16 09:16
aws_sqs.py
2.59
KB
-rw-r--r--
2022-05-16 09:16
azurearm_compute.py
10.86
KB
-rw-r--r--
2022-05-16 09:16
azurearm_dns.py
25.09
KB
-rw-r--r--
2022-05-16 09:16
azurearm_network.py
87.86
KB
-rw-r--r--
2022-05-16 09:16
azurearm_resource.py
27.22
KB
-rw-r--r--
2022-05-16 09:16
beacon.py
7.58
KB
-rw-r--r--
2022-05-16 09:16
bigip.py
96.63
KB
-rw-r--r--
2022-05-16 09:16
blockdev.py
5.13
KB
-rw-r--r--
2022-05-16 09:16
boto3_elasticache.py
48.01
KB
-rw-r--r--
2022-05-16 09:16
boto3_elasticsearch.py
32.6
KB
-rw-r--r--
2022-05-16 09:16
boto3_route53.py
37.54
KB
-rw-r--r--
2022-05-16 09:16
boto3_sns.py
12.69
KB
-rw-r--r--
2022-05-16 09:16
boto_apigateway.py
82.78
KB
-rw-r--r--
2022-05-16 09:16
boto_asg.py
31.93
KB
-rw-r--r--
2022-05-16 09:16
boto_cfn.py
11.53
KB
-rw-r--r--
2022-05-16 09:16
boto_cloudfront.py
6.01
KB
-rw-r--r--
2022-05-16 09:16
boto_cloudtrail.py
13.18
KB
-rw-r--r--
2022-05-16 09:16
boto_cloudwatch_alarm.py
6.4
KB
-rw-r--r--
2022-05-16 09:16
boto_cloudwatch_event.py
12.33
KB
-rw-r--r--
2022-05-16 09:16
boto_cognitoidentity.py
13.69
KB
-rw-r--r--
2022-05-16 09:16
boto_datapipeline.py
18.5
KB
-rw-r--r--
2022-05-16 09:16
boto_dynamodb.py
29.32
KB
-rw-r--r--
2022-05-16 09:16
boto_ec2.py
71.98
KB
-rw-r--r--
2022-05-16 09:16
boto_elasticache.py
16.75
KB
-rw-r--r--
2022-05-16 09:16
boto_elasticsearch_domain.py
12.27
KB
-rw-r--r--
2022-05-16 09:16
boto_elb.py
55.1
KB
-rw-r--r--
2022-05-16 09:16
boto_elbv2.py
12.19
KB
-rw-r--r--
2022-05-16 09:16
boto_iam.py
69.16
KB
-rw-r--r--
2022-05-16 09:16
boto_iam_role.py
27.12
KB
-rw-r--r--
2022-05-16 09:16
boto_iot.py
25.33
KB
-rw-r--r--
2022-05-16 09:16
boto_kinesis.py
16.69
KB
-rw-r--r--
2022-05-16 09:16
boto_kms.py
12.11
KB
-rw-r--r--
2022-05-16 09:16
boto_lambda.py
35.52
KB
-rw-r--r--
2022-05-16 09:16
boto_lc.py
11.04
KB
-rw-r--r--
2022-05-16 09:16
boto_rds.py
26
KB
-rw-r--r--
2022-05-16 09:16
boto_route53.py
19.38
KB
-rw-r--r--
2022-05-16 09:16
boto_s3.py
9.32
KB
-rw-r--r--
2022-05-16 09:16
boto_s3_bucket.py
24.67
KB
-rw-r--r--
2022-05-16 09:16
boto_secgroup.py
32.62
KB
-rw-r--r--
2022-05-16 09:16
boto_sns.py
8.92
KB
-rw-r--r--
2022-05-16 09:16
boto_sqs.py
7.97
KB
-rw-r--r--
2022-05-16 09:16
boto_vpc.py
61.77
KB
-rw-r--r--
2022-05-16 09:16
bower.py
8.26
KB
-rw-r--r--
2022-05-16 09:16
btrfs.py
10.31
KB
-rw-r--r--
2022-05-16 09:16
cabal.py
5.73
KB
-rw-r--r--
2022-05-16 09:16
ceph.py
1.9
KB
-rw-r--r--
2022-05-16 09:16
chef.py
3.68
KB
-rw-r--r--
2022-05-16 09:16
chocolatey.py
17.33
KB
-rw-r--r--
2022-05-16 09:16
chronos_job.py
4.6
KB
-rw-r--r--
2022-05-16 09:16
cimc.py
14.32
KB
-rw-r--r--
2022-05-16 09:16
cisconso.py
3.14
KB
-rw-r--r--
2022-05-16 09:16
cloud.py
14.4
KB
-rw-r--r--
2022-05-16 09:16
cmd.py
41.24
KB
-rw-r--r--
2022-05-16 09:16
composer.py
8.38
KB
-rw-r--r--
2022-05-16 09:16
cron.py
23.39
KB
-rw-r--r--
2022-05-16 09:16
cryptdev.py
6.17
KB
-rw-r--r--
2022-05-16 09:16
csf.py
9.98
KB
-rw-r--r--
2022-05-16 09:16
cyg.py
7.05
KB
-rw-r--r--
2022-05-16 09:16
ddns.py
4.2
KB
-rw-r--r--
2022-05-16 09:16
debconfmod.py
6.33
KB
-rw-r--r--
2022-05-16 09:16
dellchassis.py
24.49
KB
-rw-r--r--
2022-05-16 09:16
disk.py
6.49
KB
-rw-r--r--
2022-05-16 09:16
docker_container.py
85.27
KB
-rw-r--r--
2022-05-16 09:16
docker_image.py
16.38
KB
-rw-r--r--
2022-05-16 09:16
docker_network.py
36.34
KB
-rw-r--r--
2022-05-16 09:16
docker_volume.py
6.72
KB
-rw-r--r--
2022-05-16 09:16
drac.py
4.17
KB
-rw-r--r--
2022-05-16 09:16
dvs.py
26.29
KB
-rw-r--r--
2022-05-16 09:16
elasticsearch.py
20.38
KB
-rw-r--r--
2022-05-16 09:16
elasticsearch_index.py
3.25
KB
-rw-r--r--
2022-05-16 09:16
elasticsearch_index_template.py
3.67
KB
-rw-r--r--
2022-05-16 09:16
environ.py
5.81
KB
-rw-r--r--
2022-05-16 09:16
eselect.py
2.27
KB
-rw-r--r--
2022-05-16 09:16
esxcluster.py
21.16
KB
-rw-r--r--
2022-05-16 09:16
esxdatacenter.py
3.24
KB
-rw-r--r--
2022-05-16 09:16
esxi.py
61.77
KB
-rw-r--r--
2022-05-16 09:16
esxvm.py
18.86
KB
-rw-r--r--
2022-05-16 09:16
etcd_mod.py
8.36
KB
-rw-r--r--
2022-05-16 09:16
ethtool.py
7.84
KB
-rw-r--r--
2022-05-16 09:16
event.py
2.48
KB
-rw-r--r--
2022-05-16 09:16
file.py
299.79
KB
-rw-r--r--
2022-05-16 09:16
firewall.py
1.33
KB
-rw-r--r--
2022-05-16 09:16
firewalld.py
26.08
KB
-rw-r--r--
2022-05-16 09:16
gem.py
7.13
KB
-rw-r--r--
2022-05-16 09:16
git.py
124.23
KB
-rw-r--r--
2022-05-16 09:16
github.py
27.25
KB
-rw-r--r--
2022-05-16 09:16
glance_image.py
2.26
KB
-rw-r--r--
2022-05-16 09:16
glassfish.py
21.47
KB
-rw-r--r--
2022-05-16 09:16
glusterfs.py
12.12
KB
-rw-r--r--
2022-05-16 09:16
gnomedesktop.py
7.47
KB
-rw-r--r--
2022-05-16 09:16
gpg.py
5.28
KB
-rw-r--r--
2022-05-16 09:16
grafana.py
12.11
KB
-rw-r--r--
2022-05-16 09:16
grafana4_dashboard.py
17.31
KB
-rw-r--r--
2022-05-16 09:16
grafana4_datasource.py
6.15
KB
-rw-r--r--
2022-05-16 09:16
grafana4_org.py
7.73
KB
-rw-r--r--
2022-05-16 09:16
grafana4_user.py
5.52
KB
-rw-r--r--
2022-05-16 09:16
grafana_dashboard.py
17.74
KB
-rw-r--r--
2022-05-16 09:16
grafana_datasource.py
5.31
KB
-rw-r--r--
2022-05-16 09:16
grains.py
15.57
KB
-rw-r--r--
2022-05-16 09:16
group.py
9.57
KB
-rw-r--r--
2022-05-16 09:16
heat.py
9.69
KB
-rw-r--r--
2022-05-16 09:16
helm.py
10.39
KB
-rw-r--r--
2022-05-16 09:16
hg.py
6.33
KB
-rw-r--r--
2022-05-16 09:16
highstate_doc.py
1.41
KB
-rw-r--r--
2022-05-16 09:16
host.py
8.64
KB
-rw-r--r--
2022-05-16 09:16
http.py
7.43
KB
-rw-r--r--
2022-05-16 09:16
icinga2.py
9.07
KB
-rw-r--r--
2022-05-16 09:16
idem.py
3.91
KB
-rw-r--r--
2022-05-16 09:16
ifttt.py
2.12
KB
-rw-r--r--
2022-05-16 09:16
incron.py
5.71
KB
-rw-r--r--
2022-05-16 09:16
influxdb08_database.py
2.85
KB
-rw-r--r--
2022-05-16 09:16
influxdb08_user.py
3.39
KB
-rw-r--r--
2022-05-16 09:16
influxdb_continuous_query.py
2.81
KB
-rw-r--r--
2022-05-16 09:16
influxdb_database.py
2.11
KB
-rw-r--r--
2022-05-16 09:16
influxdb_retention_policy.py
4.82
KB
-rw-r--r--
2022-05-16 09:16
influxdb_user.py
4.84
KB
-rw-r--r--
2022-05-16 09:16
infoblox_a.py
4.24
KB
-rw-r--r--
2022-05-16 09:16
infoblox_cname.py
4.19
KB
-rw-r--r--
2022-05-16 09:16
infoblox_host_record.py
6.59
KB
-rw-r--r--
2022-05-16 09:16
infoblox_range.py
6.85
KB
-rw-r--r--
2022-05-16 09:16
ini_manage.py
12.67
KB
-rw-r--r--
2022-05-16 09:16
ipmi.py
8.39
KB
-rw-r--r--
2022-05-16 09:16
ipset.py
9.66
KB
-rw-r--r--
2022-05-16 09:16
iptables.py
25.96
KB
-rw-r--r--
2022-05-16 09:16
jboss7.py
23.95
KB
-rw-r--r--
2022-05-16 09:16
jenkins.py
3.36
KB
-rw-r--r--
2022-05-16 09:16
junos.py
17.78
KB
-rw-r--r--
2022-05-16 09:16
kapacitor.py
6.46
KB
-rw-r--r--
2022-05-16 09:16
kernelpkg.py
6.42
KB
-rw-r--r--
2022-05-16 09:16
keyboard.py
2.01
KB
-rw-r--r--
2022-05-16 09:16
keystone.py
27.12
KB
-rw-r--r--
2022-05-16 09:16
keystone_domain.py
2.81
KB
-rw-r--r--
2022-05-16 09:16
keystone_endpoint.py
4.69
KB
-rw-r--r--
2022-05-16 09:16
keystone_group.py
3.25
KB
-rw-r--r--
2022-05-16 09:16
keystone_project.py
3.36
KB
-rw-r--r--
2022-05-16 09:16
keystone_role.py
2.33
KB
-rw-r--r--
2022-05-16 09:16
keystone_role_grant.py
4.08
KB
-rw-r--r--
2022-05-16 09:16
keystone_service.py
2.89
KB
-rw-r--r--
2022-05-16 09:16
keystone_user.py
3.47
KB
-rw-r--r--
2022-05-16 09:16
keystore.py
5.29
KB
-rw-r--r--
2022-05-16 09:16
kmod.py
8.38
KB
-rw-r--r--
2022-05-16 09:16
kubernetes.py
24.87
KB
-rw-r--r--
2022-05-16 09:16
layman.py
2.44
KB
-rw-r--r--
2022-05-16 09:16
ldap.py
19.78
KB
-rw-r--r--
2022-05-16 09:16
libcloud_dns.py
5.7
KB
-rw-r--r--
2022-05-16 09:16
libcloud_loadbalancer.py
5.66
KB
-rw-r--r--
2022-05-16 09:16
libcloud_storage.py
5.13
KB
-rw-r--r--
2022-05-16 09:16
linux_acl.py
24.43
KB
-rw-r--r--
2022-05-16 09:16
locale.py
2.52
KB
-rw-r--r--
2022-05-16 09:16
logadm.py
4.73
KB
-rw-r--r--
2022-05-16 09:16
logrotate.py
3.86
KB
-rw-r--r--
2022-05-16 09:16
loop.py
7.74
KB
-rw-r--r--
2022-05-16 09:16
lvm.py
13.33
KB
-rw-r--r--
2022-05-16 09:16
lvs_server.py
6.28
KB
-rw-r--r--
2022-05-16 09:16
lvs_service.py
4.38
KB
-rw-r--r--
2022-05-16 09:16
lxc.py
22.17
KB
-rw-r--r--
2022-05-16 09:16
lxd.py
7.88
KB
-rw-r--r--
2022-05-16 09:16
lxd_container.py
22.25
KB
-rw-r--r--
2022-05-16 09:16
lxd_image.py
10.59
KB
-rw-r--r--
2022-05-16 09:16
lxd_profile.py
7.11
KB
-rw-r--r--
2022-05-16 09:16
mac_assistive.py
1.59
KB
-rw-r--r--
2022-05-16 09:16
mac_keychain.py
5.59
KB
-rw-r--r--
2022-05-16 09:16
mac_xattr.py
3.15
KB
-rw-r--r--
2022-05-16 09:16
macdefaults.py
2.65
KB
-rw-r--r--
2022-05-16 09:16
macpackage.py
6.76
KB
-rw-r--r--
2022-05-16 09:16
makeconf.py
6.87
KB
-rw-r--r--
2022-05-16 09:16
marathon_app.py
4.45
KB
-rw-r--r--
2022-05-16 09:16
mdadm_raid.py
6.41
KB
-rw-r--r--
2022-05-16 09:16
memcached.py
3.95
KB
-rw-r--r--
2022-05-16 09:16
modjk.py
2.84
KB
-rw-r--r--
2022-05-16 09:16
modjk_worker.py
6.49
KB
-rw-r--r--
2022-05-16 09:16
module.py
17.99
KB
-rw-r--r--
2022-05-16 09:16
mongodb_database.py
1.65
KB
-rw-r--r--
2022-05-16 09:16
mongodb_user.py
6.26
KB
-rw-r--r--
2022-05-16 09:16
monit.py
2.68
KB
-rw-r--r--
2022-05-16 09:16
mount.py
49.55
KB
-rw-r--r--
2022-05-16 09:16
mssql_database.py
3
KB
-rw-r--r--
2022-05-16 09:16
mssql_login.py
3.64
KB
-rw-r--r--
2022-05-16 09:16
mssql_role.py
2.37
KB
-rw-r--r--
2022-05-16 09:16
mssql_user.py
3.51
KB
-rw-r--r--
2022-05-16 09:16
msteams.py
2.53
KB
-rw-r--r--
2022-05-16 09:16
mysql_database.py
6.05
KB
-rw-r--r--
2022-05-16 09:16
mysql_grants.py
8.49
KB
-rw-r--r--
2022-05-16 09:16
mysql_query.py
13.07
KB
-rw-r--r--
2022-05-16 09:16
mysql_user.py
9.51
KB
-rw-r--r--
2022-05-16 09:16
net_napalm_yang.py
9.15
KB
-rw-r--r--
2022-05-16 09:16
netacl.py
31.92
KB
-rw-r--r--
2022-05-16 09:16
netconfig.py
33.42
KB
-rw-r--r--
2022-05-16 09:16
netntp.py
12.48
KB
-rw-r--r--
2022-05-16 09:16
netsnmp.py
11.33
KB
-rw-r--r--
2022-05-16 09:16
netusers.py
16.1
KB
-rw-r--r--
2022-05-16 09:16
network.py
23.7
KB
-rw-r--r--
2022-05-16 09:16
neutron_network.py
3.96
KB
-rw-r--r--
2022-05-16 09:16
neutron_secgroup.py
4
KB
-rw-r--r--
2022-05-16 09:16
neutron_secgroup_rule.py
4.75
KB
-rw-r--r--
2022-05-16 09:16
neutron_subnet.py
4.29
KB
-rw-r--r--
2022-05-16 09:16
nexus.py
4.97
KB
-rw-r--r--
2022-05-16 09:16
nfs_export.py
4.92
KB
-rw-r--r--
2022-05-16 09:16
nftables.py
19.5
KB
-rw-r--r--
2022-05-16 09:16
npm.py
11.21
KB
-rw-r--r--
2022-05-16 09:16
ntp.py
2.12
KB
-rw-r--r--
2022-05-16 09:16
nxos.py
10.67
KB
-rw-r--r--
2022-05-16 09:16
nxos_upgrade.py
3.5
KB
-rw-r--r--
2022-05-16 09:16
openstack_config.py
3.26
KB
-rw-r--r--
2022-05-16 09:16
openvswitch_bridge.py
3.13
KB
-rw-r--r--
2022-05-16 09:16
openvswitch_port.py
17.25
KB
-rw-r--r--
2022-05-16 09:16
opsgenie.py
4.07
KB
-rw-r--r--
2022-05-16 09:16
pagerduty.py
1.89
KB
-rw-r--r--
2022-05-16 09:16
pagerduty_escalation_policy.py
5.42
KB
-rw-r--r--
2022-05-16 09:16
pagerduty_schedule.py
6.09
KB
-rw-r--r--
2022-05-16 09:16
pagerduty_service.py
3.93
KB
-rw-r--r--
2022-05-16 09:16
pagerduty_user.py
1.18
KB
-rw-r--r--
2022-05-16 09:16
panos.py
48.13
KB
-rw-r--r--
2022-05-16 09:16
pbm.py
20.46
KB
-rw-r--r--
2022-05-16 09:16
pcs.py
36.46
KB
-rw-r--r--
2022-05-16 09:16
pdbedit.py
3.48
KB
-rw-r--r--
2022-05-16 09:16
pecl.py
3.65
KB
-rw-r--r--
2022-05-16 09:16
pip_state.py
37.55
KB
-rw-r--r--
2022-05-16 09:16
pkg.py
127.05
KB
-rw-r--r--
2022-05-16 09:16
pkgbuild.py
11.37
KB
-rw-r--r--
2022-05-16 09:16
pkgng.py
685
B
-rw-r--r--
2022-05-16 09:16
pkgrepo.py
23.59
KB
-rw-r--r--
2022-05-16 09:16
portage_config.py
5.01
KB
-rw-r--r--
2022-05-16 09:16
ports.py
5.65
KB
-rw-r--r--
2022-05-16 09:16
postgres_cluster.py
4.19
KB
-rw-r--r--
2022-05-16 09:16
postgres_database.py
6.08
KB
-rw-r--r--
2022-05-16 09:16
postgres_extension.py
5.68
KB
-rw-r--r--
2022-05-16 09:16
postgres_group.py
8.52
KB
-rw-r--r--
2022-05-16 09:16
postgres_initdb.py
2.84
KB
-rw-r--r--
2022-05-16 09:16
postgres_language.py
3.94
KB
-rw-r--r--
2022-05-16 09:16
postgres_privileges.py
7.86
KB
-rw-r--r--
2022-05-16 09:16
postgres_schema.py
4.34
KB
-rw-r--r--
2022-05-16 09:16
postgres_tablespace.py
6.62
KB
-rw-r--r--
2022-05-16 09:16
postgres_user.py
9.49
KB
-rw-r--r--
2022-05-16 09:16
powerpath.py
2.34
KB
-rw-r--r--
2022-05-16 09:16
probes.py
15.06
KB
-rw-r--r--
2022-05-16 09:16
process.py
1.32
KB
-rw-r--r--
2022-05-16 09:16
proxy.py
4.94
KB
-rw-r--r--
2022-05-16 09:16
pushover.py
3.13
KB
-rw-r--r--
2022-05-16 09:16
pyenv.py
6.07
KB
-rw-r--r--
2022-05-16 09:16
pyrax_queues.py
2.97
KB
-rw-r--r--
2022-05-16 09:16
quota.py
1.4
KB
-rw-r--r--
2022-05-16 09:16
rabbitmq_cluster.py
1.84
KB
-rw-r--r--
2022-05-16 09:16
rabbitmq_plugin.py
2.77
KB
-rw-r--r--
2022-05-16 09:16
rabbitmq_policy.py
4.58
KB
-rw-r--r--
2022-05-16 09:16
rabbitmq_upstream.py
7.9
KB
-rw-r--r--
2022-05-16 09:16
rabbitmq_user.py
8.89
KB
-rw-r--r--
2022-05-16 09:16
rabbitmq_vhost.py
3.04
KB
-rw-r--r--
2022-05-16 09:16
rbac_solaris.py
6.67
KB
-rw-r--r--
2022-05-16 09:16
rbenv.py
7.36
KB
-rw-r--r--
2022-05-16 09:16
rdp.py
1.28
KB
-rw-r--r--
2022-05-16 09:16
redismod.py
4.76
KB
-rw-r--r--
2022-05-16 09:16
reg.py
19.22
KB
-rw-r--r--
2022-05-16 09:16
rsync.py
4.45
KB
-rw-r--r--
2022-05-16 09:16
rvm.py
6.56
KB
-rw-r--r--
2022-05-16 09:16
salt_proxy.py
1.34
KB
-rw-r--r--
2022-05-16 09:16
saltmod.py
30.88
KB
-rw-r--r--
2022-05-16 09:16
saltutil.py
8.91
KB
-rw-r--r--
2022-05-16 09:16
schedule.py
11.89
KB
-rw-r--r--
2022-05-16 09:16
selinux.py
18.61
KB
-rw-r--r--
2022-05-16 09:16
serverdensity_device.py
6.41
KB
-rw-r--r--
2022-05-16 09:16
service.py
37.06
KB
-rw-r--r--
2022-05-16 09:16
slack.py
4.98
KB
-rw-r--r--
2022-05-16 09:16
smartos.py
44.89
KB
-rw-r--r--
2022-05-16 09:16
smtp.py
2.3
KB
-rw-r--r--
2022-05-16 09:16
snapper.py
7.24
KB
-rw-r--r--
2022-05-16 09:16
solrcloud.py
4.48
KB
-rw-r--r--
2022-05-16 09:16
splunk.py
4.32
KB
-rw-r--r--
2022-05-16 09:16
splunk_search.py
3.17
KB
-rw-r--r--
2022-05-16 09:16
sqlite3.py
14.7
KB
-rw-r--r--
2022-05-16 09:16
ssh_auth.py
19.1
KB
-rw-r--r--
2022-05-16 09:16
ssh_known_hosts.py
7.87
KB
-rw-r--r--
2022-05-16 09:16
stateconf.py
494
B
-rw-r--r--
2022-05-16 09:16
status.py
2.21
KB
-rw-r--r--
2022-05-16 09:16
statuspage.py
17.29
KB
-rw-r--r--
2022-05-16 09:16
supervisord.py
10.48
KB
-rw-r--r--
2022-05-16 09:16
svn.py
8.14
KB
-rw-r--r--
2022-05-16 09:16
sysctl.py
3.82
KB
-rw-r--r--
2022-05-16 09:16
syslog_ng.py
2.97
KB
-rw-r--r--
2022-05-16 09:16
sysrc.py
2.82
KB
-rw-r--r--
2022-05-16 09:16
telemetry_alert.py
7.04
KB
-rw-r--r--
2022-05-16 09:16
test.py
13.09
KB
-rw-r--r--
2022-05-16 09:16
testinframod.py
1.35
KB
-rw-r--r--
2022-05-16 09:16
timezone.py
3.42
KB
-rw-r--r--
2022-05-16 09:16
tls.py
1.81
KB
-rw-r--r--
2022-05-16 09:16
tomcat.py
9.72
KB
-rw-r--r--
2022-05-16 09:16
trafficserver.py
8.82
KB
-rw-r--r--
2022-05-16 09:16
tuned.py
3.32
KB
-rw-r--r--
2022-05-16 09:16
uptime.py
1.87
KB
-rw-r--r--
2022-05-16 09:16
user.py
35.43
KB
-rw-r--r--
2022-05-16 09:16
vagrant.py
11.4
KB
-rw-r--r--
2022-05-16 09:16
vault.py
3.28
KB
-rw-r--r--
2022-05-16 09:16
vbox_guest.py
4.05
KB
-rw-r--r--
2022-05-16 09:16
victorops.py
3.32
KB
-rw-r--r--
2022-05-16 09:16
virt.py
80.06
KB
-rw-r--r--
2022-05-16 09:16
virtualenv_mod.py
11.21
KB
-rw-r--r--
2022-05-16 09:16
webutil.py
3.78
KB
-rw-r--r--
2022-05-16 09:16
win_certutil.py
2.88
KB
-rw-r--r--
2022-05-16 09:16
win_dacl.py
7.96
KB
-rw-r--r--
2022-05-16 09:16
win_dism.py
13.02
KB
-rw-r--r--
2022-05-16 09:16
win_dns_client.py
8.32
KB
-rw-r--r--
2022-05-16 09:16
win_firewall.py
6.87
KB
-rw-r--r--
2022-05-16 09:16
win_iis.py
31.56
KB
-rw-r--r--
2022-05-16 09:16
win_lgpo.py
25.41
KB
-rw-r--r--
2022-05-16 09:16
win_license.py
1.6
KB
-rw-r--r--
2022-05-16 09:16
win_network.py
14.18
KB
-rw-r--r--
2022-05-16 09:16
win_path.py
6.39
KB
-rw-r--r--
2022-05-16 09:16
win_pki.py
5.56
KB
-rw-r--r--
2022-05-16 09:16
win_powercfg.py
3.79
KB
-rw-r--r--
2022-05-16 09:16
win_servermanager.py
10.4
KB
-rw-r--r--
2022-05-16 09:16
win_smtp_server.py
10.01
KB
-rw-r--r--
2022-05-16 09:16
win_snmp.py
6.64
KB
-rw-r--r--
2022-05-16 09:16
win_system.py
13.78
KB
-rw-r--r--
2022-05-16 09:16
win_wua.py
14.47
KB
-rw-r--r--
2022-05-16 09:16
win_wusa.py
3.53
KB
-rw-r--r--
2022-05-16 09:16
winrepo.py
2.74
KB
-rw-r--r--
2022-05-16 09:16
wordpress.py
4.82
KB
-rw-r--r--
2022-05-16 09:16
x509.py
26.98
KB
-rw-r--r--
2022-05-16 09:16
xml.py
1.75
KB
-rw-r--r--
2022-05-16 09:16
xmpp.py
2.61
KB
-rw-r--r--
2022-05-16 09:16
zabbix_action.py
9.35
KB
-rw-r--r--
2022-05-16 09:16
zabbix_host.py
27.25
KB
-rw-r--r--
2022-05-16 09:16
zabbix_hostgroup.py
5.64
KB
-rw-r--r--
2022-05-16 09:16
zabbix_mediatype.py
16.89
KB
-rw-r--r--
2022-05-16 09:16
zabbix_template.py
35.14
KB
-rw-r--r--
2022-05-16 09:16
zabbix_user.py
15.76
KB
-rw-r--r--
2022-05-16 09:16
zabbix_usergroup.py
9.64
KB
-rw-r--r--
2022-05-16 09:16
zabbix_usermacro.py
9.69
KB
-rw-r--r--
2022-05-16 09:16
zabbix_valuemap.py
8.11
KB
-rw-r--r--
2022-05-16 09:16
zcbuildout.py
5.16
KB
-rw-r--r--
2022-05-16 09:16
zenoss.py
2.89
KB
-rw-r--r--
2022-05-16 09:16
zfs.py
34.27
KB
-rw-r--r--
2022-05-16 09:16
zk_concurrency.py
5.81
KB
-rw-r--r--
2022-05-16 09:16
zone.py
46.49
KB
-rw-r--r--
2022-05-16 09:16
zookeeper.py
11.53
KB
-rw-r--r--
2022-05-16 09:16
zpool.py
13.08
KB
-rw-r--r--
2022-05-16 09:16
Save
Rename
""" States for managing zfs datasets :maintainer: Jorge Schrauwen <sjorge@blackdot.be> :maturity: new :depends: salt.utils.zfs, salt.modules.zfs :platform: smartos, illumos, solaris, freebsd, linux .. versionadded:: 2016.3.0 .. versionchanged:: 2018.3.1 Big refactor to remove duplicate code, better type conversions and improved consistency in output. .. code-block:: yaml test/shares/yuki: zfs.filesystem_present: - create_parent: true - properties: quota: 16G test/iscsi/haruhi: zfs.volume_present: - create_parent: true - volume_size: 16M - sparse: true - properties: readonly: on test/shares/yuki@frozen: zfs.snapshot_present moka_origin: zfs.hold_present: - snapshot: test/shares/yuki@frozen test/shares/moka: zfs.filesystem_present: - cloned_from: test/shares/yuki@frozen test/shares/moka@tsukune: zfs.snapshot_absent """ import logging from datetime import datetime from salt.utils.odict import OrderedDict log = logging.getLogger(__name__) # Define the state's virtual name __virtualname__ = "zfs" # Compare modifiers for zfs.schedule_snapshot comp_hour = {"minute": 0} comp_day = {"minute": 0, "hour": 0} comp_month = {"minute": 0, "hour": 0, "day": 1} comp_year = {"minute": 0, "hour": 0, "day": 1, "month": 1} def __virtual__(): """ Provides zfs state """ if not __grains__.get("zfs_support"): return False, "The zfs state cannot be loaded: zfs not supported" return __virtualname__ def _absent(name, dataset_type, force=False, recursive=False): """ internal shared function for *_absent name : string name of dataset dataset_type : string [filesystem, volume, snapshot, or bookmark] type of dataset to remove force : boolean try harder to destroy the dataset recursive : boolean also destroy all the child datasets """ ret = {"name": name, "changes": {}, "result": True, "comment": ""} ## log configuration dataset_type = dataset_type.lower() log.debug("zfs.%s_absent::%s::config::force = %s", dataset_type, name, force) log.debug( "zfs.%s_absent::%s::config::recursive = %s", dataset_type, name, recursive ) ## destroy dataset if needed if __salt__["zfs.exists"](name, **{"type": dataset_type}): ## NOTE: dataset found with the name and dataset_type if not __opts__["test"]: mod_res = __salt__["zfs.destroy"]( name, **{"force": force, "recursive": recursive} ) else: mod_res = OrderedDict([("destroyed", True)]) ret["result"] = mod_res["destroyed"] if ret["result"]: ret["changes"][name] = "destroyed" ret["comment"] = "{} {} was destroyed".format( dataset_type, name, ) else: ret["comment"] = "failed to destroy {} {}".format( dataset_type, name, ) if "error" in mod_res: ret["comment"] = mod_res["error"] else: ## NOTE: no dataset found with name of the dataset_type ret["comment"] = "{} {} is absent".format(dataset_type, name) return ret def filesystem_absent(name, force=False, recursive=False): """ ensure filesystem is absent on the system name : string name of filesystem force : boolean try harder to destroy the dataset (zfs destroy -f) recursive : boolean also destroy all the child datasets (zfs destroy -r) .. warning:: If a volume with ``name`` exists, this state will succeed without destroying the volume specified by ``name``. This module is dataset type sensitive. """ if not __utils__["zfs.is_dataset"](name): ret = { "name": name, "changes": {}, "result": False, "comment": "invalid dataset name: {}".format(name), } else: ret = _absent(name, "filesystem", force, recursive) return ret def volume_absent(name, force=False, recursive=False): """ ensure volume is absent on the system name : string name of volume force : boolean try harder to destroy the dataset (zfs destroy -f) recursive : boolean also destroy all the child datasets (zfs destroy -r) .. warning:: If a filesystem with ``name`` exists, this state will succeed without destroying the filesystem specified by ``name``. This module is dataset type sensitive. """ if not __utils__["zfs.is_dataset"](name): ret = { "name": name, "changes": {}, "result": False, "comment": "invalid dataset name: {}".format(name), } else: ret = _absent(name, "volume", force, recursive) return ret def snapshot_absent(name, force=False, recursive=False): """ ensure snapshot is absent on the system name : string name of snapshot force : boolean try harder to destroy the dataset (zfs destroy -f) recursive : boolean also destroy all the child datasets (zfs destroy -r) """ if not __utils__["zfs.is_snapshot"](name): ret = { "name": name, "changes": {}, "result": False, "comment": "invalid snapshot name: {}".format(name), } else: ret = _absent(name, "snapshot", force, recursive) return ret def bookmark_absent(name, force=False, recursive=False): """ ensure bookmark is absent on the system name : string name of snapshot force : boolean try harder to destroy the dataset (zfs destroy -f) recursive : boolean also destroy all the child datasets (zfs destroy -r) """ if not __utils__["zfs.is_bookmark"](name): ret = { "name": name, "changes": {}, "result": False, "comment": "invalid bookmark name: {}".format(name), } else: ret = _absent(name, "bookmark", force, recursive) return ret def hold_absent(name, snapshot, recursive=False): """ ensure hold is absent on the system name : string name of hold snapshot : string name of snapshot recursive : boolean recursively releases a hold with the given tag on the snapshots of all descendent file systems. """ ret = {"name": name, "changes": {}, "result": True, "comment": ""} ## log configuration log.debug("zfs.hold_absent::%s::config::snapshot = %s", name, snapshot) log.debug("zfs.hold_absent::%s::config::recursive = %s", name, recursive) ## check we have a snapshot/tag name if not __utils__["zfs.is_snapshot"](snapshot): ret["result"] = False ret["comment"] = "invalid snapshot name: {}".format(snapshot) return ret if ( __utils__["zfs.is_snapshot"](name) or __utils__["zfs.is_bookmark"](name) or name == "error" ): ret["result"] = False ret["comment"] = "invalid tag name: {}".format(name) return ret ## release hold if required holds = __salt__["zfs.holds"](snapshot) if name in holds: ## NOTE: hold found for snapshot, release it if not __opts__["test"]: mod_res = __salt__["zfs.release"]( name, snapshot, **{"recursive": recursive} ) else: mod_res = OrderedDict([("released", True)]) ret["result"] = mod_res["released"] if ret["result"]: ret["changes"] = {snapshot: {name: "released"}} ret["comment"] = "hold {} released".format( name, ) else: ret["comment"] = "failed to release hold {}".format( name, ) if "error" in mod_res: ret["comment"] = mod_res["error"] elif "error" in holds: ## NOTE: we have an error ret["result"] = False ret["comment"] = holds["error"] else: ## NOTE: no hold found with name for snapshot ret["comment"] = "hold {} is absent".format( name, ) return ret def hold_present(name, snapshot, recursive=False): """ ensure hold is present on the system name : string name of holdt snapshot : string name of snapshot recursive : boolean recursively add hold with the given tag on the snapshots of all descendent file systems. """ ret = {"name": name, "changes": {}, "result": True, "comment": ""} ## log configuration log.debug("zfs.hold_present::%s::config::snapshot = %s", name, snapshot) log.debug("zfs.hold_present::%s::config::recursive = %s", name, recursive) ## check we have a snapshot/tag name if not __utils__["zfs.is_snapshot"](snapshot): ret["result"] = False ret["comment"] = "invalid snapshot name: {}".format(snapshot) return ret if ( __utils__["zfs.is_snapshot"](name) or __utils__["zfs.is_bookmark"](name) or name == "error" ): ret["result"] = False ret["comment"] = "invalid tag name: {}".format(name) return ret ## place hold if required holds = __salt__["zfs.holds"](snapshot) if name in holds: ## NOTE: hold with name already exists for snapshot ret["comment"] = "hold {} is present for {}".format( name, snapshot, ) else: ## NOTE: no hold found with name for snapshot if not __opts__["test"]: mod_res = __salt__["zfs.hold"](name, snapshot, **{"recursive": recursive}) else: mod_res = OrderedDict([("held", True)]) ret["result"] = mod_res["held"] if ret["result"]: ret["changes"] = OrderedDict([(snapshot, OrderedDict([(name, "held")]))]) ret["comment"] = "hold {} added to {}".format(name, snapshot) else: ret["comment"] = "failed to add hold {} to {}".format(name, snapshot) if "error" in mod_res: ret["comment"] = mod_res["error"] return ret def _dataset_present( dataset_type, name, volume_size=None, sparse=False, create_parent=False, properties=None, cloned_from=None, ): """ internal handler for filesystem_present/volume_present dataset_type : string volume or filesystem name : string name of volume volume_size : string size of volume sparse : boolean create sparse volume create_parent : boolean creates all the non-existing parent datasets. any property specified on the command line using the -o option is ignored. cloned_from : string name of snapshot to clone properties : dict additional zfs properties (-o) .. note:: ``cloned_from`` is only use if the volume does not exist yet, when ``cloned_from`` is set after the volume exists it will be ignored. .. note:: Properties do not get cloned, if you specify the properties in the state file they will be applied on a subsequent run. ``volume_size`` is considered a property, so the volume's size will be corrected when the properties get updated if it differs from the original volume. The sparse parameter is ignored when using ``cloned_from``. """ ret = {"name": name, "changes": {}, "result": True, "comment": ""} ## fallback dataset_type to filesystem if out of range if dataset_type not in ["filesystem", "volume"]: dataset_type = "filesystem" ## ensure properties are zfs values if volume_size: volume_size = __utils__["zfs.from_size"](volume_size) if properties: properties = __utils__["zfs.from_auto_dict"](properties) elif properties is None: properties = {} ## log configuration log.debug( "zfs.%s_present::%s::config::volume_size = %s", dataset_type, name, volume_size ) log.debug("zfs.%s_present::%s::config::sparse = %s", dataset_type, name, sparse) log.debug( "zfs.%s_present::%s::config::create_parent = %s", dataset_type, name, create_parent, ) log.debug( "zfs.%s_present::%s::config::cloned_from = %s", dataset_type, name, cloned_from ) log.debug( "zfs.%s_present::%s::config::properties = %s", dataset_type, name, properties ) ## check we have valid filesystem name/volume name/clone snapshot if not __utils__["zfs.is_dataset"](name): ret["result"] = False ret["comment"] = "invalid dataset name: {}".format(name) return ret if cloned_from and not __utils__["zfs.is_snapshot"](cloned_from): ret["result"] = False ret["comment"] = "{} is not a snapshot".format(cloned_from) return ret ## ensure dataset is in correct state ## NOTE: update the dataset if __salt__["zfs.exists"](name, **{"type": dataset_type}): ## NOTE: fetch current volume properties properties_current = __salt__["zfs.get"]( name, type=dataset_type, fields="value", depth=0, parsable=True, ).get(name, OrderedDict()) ## NOTE: add volsize to properties if volume_size: properties["volsize"] = volume_size ## NOTE: build list of properties to update properties_update = [] for prop in properties: ## NOTE: skip unexisting properties if prop not in properties_current: log.warning( "zfs.%s_present::%s::update - unknown property: %s", dataset_type, name, prop, ) continue ## NOTE: compare current and wanted value if properties_current[prop]["value"] != properties[prop]: properties_update.append(prop) ## NOTE: update pool properties for prop in properties_update: if not __opts__["test"]: mod_res = __salt__["zfs.set"](name, **{prop: properties[prop]}) else: mod_res = OrderedDict([("set", True)]) if mod_res["set"]: if name not in ret["changes"]: ret["changes"][name] = {} ret["changes"][name][prop] = properties[prop] else: ret["result"] = False if ret["comment"] == "": ret["comment"] = "The following properties were not updated:" ret["comment"] = "{} {}".format(ret["comment"], prop) ## NOTE: update comment if ret["result"] and name in ret["changes"]: ret["comment"] = "{} {} was updated".format(dataset_type, name) elif ret["result"]: ret["comment"] = "{} {} is uptodate".format(dataset_type, name) else: ret["comment"] = "{} {} failed to be updated".format(dataset_type, name) ## NOTE: create or clone the dataset else: mod_res_action = "cloned" if cloned_from else "created" if __opts__["test"]: ## NOTE: pretend to create/clone mod_res = OrderedDict([(mod_res_action, True)]) elif cloned_from: ## NOTE: add volsize to properties if volume_size: properties["volsize"] = volume_size ## NOTE: clone the dataset mod_res = __salt__["zfs.clone"]( cloned_from, name, **{"create_parent": create_parent, "properties": properties} ) else: ## NOTE: create the dataset mod_res = __salt__["zfs.create"]( name, **{ "create_parent": create_parent, "properties": properties, "volume_size": volume_size, "sparse": sparse, } ) ret["result"] = mod_res[mod_res_action] if ret["result"]: ret["changes"][name] = mod_res_action if properties: ret["changes"][name] = properties ret["comment"] = "{} {} was {}".format( dataset_type, name, mod_res_action, ) else: ret["comment"] = "failed to {} {} {}".format( mod_res_action[:-1], dataset_type, name, ) if "error" in mod_res: ret["comment"] = mod_res["error"] return ret def filesystem_present(name, create_parent=False, properties=None, cloned_from=None): """ ensure filesystem exists and has properties set name : string name of filesystem create_parent : boolean creates all the non-existing parent datasets. any property specified on the command line using the -o option is ignored. cloned_from : string name of snapshot to clone properties : dict additional zfs properties (-o) .. note:: ``cloned_from`` is only use if the filesystem does not exist yet, when ``cloned_from`` is set after the filesystem exists it will be ignored. .. note:: Properties do not get cloned, if you specify the properties in the state file they will be applied on a subsequent run. """ return _dataset_present( "filesystem", name, create_parent=create_parent, properties=properties, cloned_from=cloned_from, ) def volume_present( name, volume_size, sparse=False, create_parent=False, properties=None, cloned_from=None, ): """ ensure volume exists and has properties set name : string name of volume volume_size : string size of volume sparse : boolean create sparse volume create_parent : boolean creates all the non-existing parent datasets. any property specified on the command line using the -o option is ignored. cloned_from : string name of snapshot to clone properties : dict additional zfs properties (-o) .. note:: ``cloned_from`` is only use if the volume does not exist yet, when ``cloned_from`` is set after the volume exists it will be ignored. .. note:: Properties do not get cloned, if you specify the properties in the state file they will be applied on a subsequent run. ``volume_size`` is considered a property, so the volume's size will be corrected when the properties get updated if it differs from the original volume. The sparse parameter is ignored when using ``cloned_from``. """ return _dataset_present( "volume", name, volume_size, sparse=sparse, create_parent=create_parent, properties=properties, cloned_from=cloned_from, ) def bookmark_present(name, snapshot): """ ensure bookmark exists name : string name of bookmark snapshot : string name of snapshot """ ret = {"name": name, "changes": {}, "result": True, "comment": ""} ## log configuration log.debug("zfs.bookmark_present::%s::config::snapshot = %s", name, snapshot) ## check we have valid snapshot/bookmark name if not __utils__["zfs.is_snapshot"](snapshot): ret["result"] = False ret["comment"] = "invalid snapshot name: {}".format(name) return ret if "#" not in name and "/" not in name: ## NOTE: simple snapshot name # take the snapshot name and replace the snapshot but with the simple name # e.g. pool/fs@snap + bm --> pool/fs#bm name = "{}#{}".format(snapshot[: snapshot.index("@")], name) ret["name"] = name if not __utils__["zfs.is_bookmark"](name): ret["result"] = False ret["comment"] = "invalid bookmark name: {}".format(name) return ret ## ensure bookmark exists if not __salt__["zfs.exists"](name, **{"type": "bookmark"}): ## NOTE: bookmark the snapshot if not __opts__["test"]: mod_res = __salt__["zfs.bookmark"](snapshot, name) else: mod_res = OrderedDict([("bookmarked", True)]) ret["result"] = mod_res["bookmarked"] if ret["result"]: ret["changes"][name] = snapshot ret["comment"] = "{} bookmarked as {}".format(snapshot, name) else: ret["comment"] = "failed to bookmark {}".format(snapshot) if "error" in mod_res: ret["comment"] = mod_res["error"] else: ## NOTE: bookmark already exists ret["comment"] = "bookmark is present" return ret def snapshot_present(name, recursive=False, properties=None): """ ensure snapshot exists and has properties set name : string name of snapshot recursive : boolean recursively create snapshots of all descendent datasets properties : dict additional zfs properties (-o) .. note: Properties are only set at creation time """ ret = {"name": name, "changes": {}, "result": True, "comment": ""} ## log configuration log.debug("zfs.snapshot_present::%s::config::recursive = %s", name, recursive) log.debug("zfs.snapshot_present::%s::config::properties = %s", name, properties) ## ensure properties are zfs values if properties: properties = __utils__["zfs.from_auto_dict"](properties) ## check we have valid snapshot name if not __utils__["zfs.is_snapshot"](name): ret["result"] = False ret["comment"] = "invalid snapshot name: {}".format(name) return ret ## ensure snapshot exits if not __salt__["zfs.exists"](name, **{"type": "snapshot"}): ## NOTE: create the snapshot if not __opts__["test"]: mod_res = __salt__["zfs.snapshot"]( name, **{"recursive": recursive, "properties": properties} ) else: mod_res = OrderedDict([("snapshotted", True)]) ret["result"] = mod_res["snapshotted"] if ret["result"]: ret["changes"][name] = "snapshotted" if properties: ret["changes"][name] = properties ret["comment"] = "snapshot {} was created".format(name) else: ret["comment"] = "failed to create snapshot {}".format(name) if "error" in mod_res: ret["comment"] = mod_res["error"] else: ## NOTE: snapshot already exists ret["comment"] = "snapshot is present" return ret def promoted(name): """ ensure a dataset is not a clone name : string name of fileset or volume .. warning:: only one dataset can be the origin, if you promote a clone the original will now point to the promoted dataset """ ret = {"name": name, "changes": {}, "result": True, "comment": ""} ## check we if we have a valid dataset name if not __utils__["zfs.is_dataset"](name): ret["result"] = False ret["comment"] = "invalid dataset name: {}".format(name) return ret ## ensure dataset is the primary instance if not __salt__["zfs.exists"](name, **{"type": "filesystem,volume"}): ## NOTE: we don't have a dataset ret["result"] = False ret["comment"] = "dataset {} does not exist".format(name) else: ## NOTE: check if we have a blank origin (-) if ( __salt__["zfs.get"]( name, **{"properties": "origin", "fields": "value", "parsable": True} )[name]["origin"]["value"] == "-" ): ## NOTE: we're already promoted ret["comment"] = "{} already promoted".format(name) else: ## NOTE: promote dataset if not __opts__["test"]: mod_res = __salt__["zfs.promote"](name) else: mod_res = OrderedDict([("promoted", True)]) ret["result"] = mod_res["promoted"] if ret["result"]: ret["changes"][name] = "promoted" ret["comment"] = "{} promoted".format(name) else: ret["comment"] = "failed to promote {}".format(name) if "error" in mod_res: ret["comment"] = mod_res["error"] return ret def _schedule_snapshot_retrieve(dataset, prefix, snapshots): """ Update snapshots dict with current snapshots dataset: string name of filesystem or volume prefix : string prefix for the snapshots e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm' snapshots : OrderedDict preseeded OrderedDict with configuration """ ## NOTE: retrieve all snapshots for the dataset for snap in sorted( __salt__["zfs.list"]( dataset, **{"recursive": True, "depth": 1, "type": "snapshot"} ).keys() ): ## NOTE: we only want the actualy name ## myzpool/data@zbck-20171201_000248 -> zbck-20171201_000248 snap_name = snap[snap.index("@") + 1 :] ## NOTE: we only want snapshots matching our prefix if not snap_name.startswith("{}-".format(prefix)): continue ## NOTE: retrieve the holds for this snapshot snap_holds = __salt__["zfs.holds"](snap) ## NOTE: this snapshot has no holds, eligable for pruning if not snap_holds: snapshots["_prunable"].append(snap) ## NOTE: update snapshots based on holds (if any) ## we are only interested in the ones from our schedule ## if we find any others we skip them for hold in snap_holds: if hold in snapshots["_schedule"].keys(): snapshots[hold].append(snap) return snapshots def _schedule_snapshot_prepare(dataset, prefix, snapshots): """ Update snapshots dict with info for a new snapshot dataset: string name of filesystem or volume prefix : string prefix for the snapshots e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm' snapshots : OrderedDict preseeded OrderedDict with configuration """ ## NOTE: generate new snapshot name snapshot_create_name = "{dataset}@{prefix}-{timestamp}".format( dataset=dataset, prefix=prefix, timestamp=datetime.now().strftime("%Y%m%d_%H%M%S"), ) ## NOTE: figure out if we need to create the snapshot timestamp_now = datetime.now().replace(second=0, microsecond=0) snapshots["_create"][snapshot_create_name] = [] for hold, hold_count in snapshots["_schedule"].items(): ## NOTE: skip hold if we don't keep snapshots for it if hold_count == 0: continue ## NOTE: figure out if we need the current hold on the new snapshot if snapshots[hold]: ## NOTE: extract datetime from snapshot name timestamp = datetime.strptime( snapshots[hold][-1], "{}@{}-%Y%m%d_%H%M%S".format(dataset, prefix), ).replace(second=0, microsecond=0) ## NOTE: compare current timestamp to timestamp from snapshot if hold == "minute" and timestamp_now <= timestamp: continue elif hold == "hour" and timestamp_now.replace( **comp_hour ) <= timestamp.replace(**comp_hour): continue elif hold == "day" and timestamp_now.replace( **comp_day ) <= timestamp.replace(**comp_day): continue elif hold == "month" and timestamp_now.replace( **comp_month ) <= timestamp.replace(**comp_month): continue elif hold == "year" and timestamp_now.replace( **comp_year ) <= timestamp.replace(**comp_year): continue ## NOTE: add hold entry for snapshot snapshots["_create"][snapshot_create_name].append(hold) return snapshots def scheduled_snapshot(name, prefix, recursive=True, schedule=None): """ maintain a set of snapshots based on a schedule name : string name of filesystem or volume prefix : string prefix for the snapshots e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm' recursive : boolean create snapshots for all children also schedule : dict dict holding the schedule, the following keys are available (minute, hour, day, month, and year) by default all are set to 0 the value indicated the number of snapshots of that type to keep around. .. warning:: snapshots will only be created and pruned every time the state runs. a schedule must be setup to automatically run the state. this means that if you run the state daily the hourly snapshot will only be made once per day! .. versionchanged:: 2018.3.0 switched to localtime from gmtime so times now take into account timezones. """ ret = {"name": name, "changes": {}, "result": True, "comment": ""} ## initialize defaults schedule_holds = ["minute", "hour", "day", "month", "year"] snapshots = OrderedDict( [("_create", OrderedDict()), ("_prunable", []), ("_schedule", OrderedDict())] ) ## strict configuration validation ## NOTE: we need a valid dataset if not __utils__["zfs.is_dataset"](name): ret["result"] = False ret["comment"] = "invalid dataset name: {}".format(name) if not __salt__["zfs.exists"](name, **{"type": "filesystem,volume"}): ret["comment"] = "dataset {} does not exist".format(name) ret["result"] = False ## NOTE: prefix must be 4 or longer if not prefix or len(prefix) < 4: ret["comment"] = "prefix ({}) must be at least 4 long".format(prefix) ret["result"] = False ## NOTE: validate schedule total_count = 0 for hold in schedule_holds: snapshots[hold] = [] if hold not in schedule: snapshots["_schedule"][hold] = 0 elif isinstance(schedule[hold], int): snapshots["_schedule"][hold] = schedule[hold] else: ret["result"] = False ret["comment"] = "schedule value for {} is not an integer".format( hold, ) break total_count += snapshots["_schedule"][hold] if ret["result"] and total_count == 0: ret["result"] = False ret["comment"] = "schedule is not valid, you need to keep atleast 1 snapshot" ## NOTE: return if configuration is not valid if not ret["result"]: return ret ## retrieve existing snapshots snapshots = _schedule_snapshot_retrieve(name, prefix, snapshots) ## prepare snapshot snapshots = _schedule_snapshot_prepare(name, prefix, snapshots) ## log configuration log.debug("zfs.scheduled_snapshot::%s::config::recursive = %s", name, recursive) log.debug("zfs.scheduled_snapshot::%s::config::prefix = %s", name, prefix) log.debug("zfs.scheduled_snapshot::%s::snapshots = %s", name, snapshots) ## create snapshot(s) for snapshot_name, snapshot_holds in snapshots["_create"].items(): ## NOTE: skip if new snapshot has no holds if not snapshot_holds: continue ## NOTE: create snapshot if not __opts__["test"]: mod_res = __salt__["zfs.snapshot"]( snapshot_name, **{"recursive": recursive} ) else: mod_res = OrderedDict([("snapshotted", True)]) if not mod_res["snapshotted"]: ret["result"] = False ret["comment"] = "error creating snapshot ({})".format(snapshot_name) else: ## NOTE: create holds (if we have a snapshot) for hold in snapshot_holds: if not __opts__["test"]: mod_res = __salt__["zfs.hold"]( hold, snapshot_name, **{"recursive": recursive} ) else: mod_res = OrderedDict([("held", True)]) if not mod_res["held"]: ret["result"] = False ret["comment"] = "error adding hold ({}) to snapshot ({})".format( hold, snapshot_name, ) break snapshots[hold].append(snapshot_name) if ret["result"]: ret["comment"] = "scheduled snapshots updated" if "created" not in ret["changes"]: ret["changes"]["created"] = [] ret["changes"]["created"].append(snapshot_name) ## prune hold(s) for hold, hold_count in snapshots["_schedule"].items(): while ret["result"] and len(snapshots[hold]) > hold_count: ## NOTE: pop oldest snapshot snapshot_name = snapshots[hold].pop(0) ## NOTE: release hold for snapshot if not __opts__["test"]: mod_res = __salt__["zfs.release"]( hold, snapshot_name, **{"recursive": recursive} ) else: mod_res = OrderedDict([("released", True)]) if not mod_res["released"]: ret["result"] = False ret["comment"] = "error adding hold ({}) to snapshot ({})".format( hold, snapshot_name, ) ## NOTE: mark as prunable if not __salt__["zfs.holds"](snapshot_name): snapshots["_prunable"].append(snapshot_name) ## prune snapshot(s) for snapshot_name in snapshots["_prunable"]: ## NOTE: destroy snapshot if not __opts__["test"]: mod_res = __salt__["zfs.destroy"](snapshot_name, **{"recursive": recursive}) else: mod_res = OrderedDict([("destroyed", True)]) if not mod_res["destroyed"]: ret["result"] = False ret["comment"] = "error prunding snapshot ({1})".format( snapshot_name, ) break if ret["result"] and snapshots["_prunable"]: ret["comment"] = "scheduled snapshots updated" ret["changes"]["pruned"] = snapshots["_prunable"] if ret["result"] and not ret["changes"]: ret["comment"] = "scheduled snapshots are up to date" return ret # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4