hmetzner 3 years ago
parent
commit
55d9ff526b
100 changed files with 8626 additions and 951 deletions
  1. 6 6
      .storage/auth
  2. 1 1
      .storage/core.config_entries
  3. 12 12
      .storage/core.device_registry
  4. 173 12
      .storage/core.entity_registry
  5. 523 523
      .storage/core.restore_state
  6. 414 395
      .storage/hacs.repositories
  7. 2 2
      .storage/http
  8. 11 0
      configuration.yaml
  9. 229 0
      custom_components/waste_collection_schedule/__init__.py
  10. BIN
      custom_components/waste_collection_schedule/__pycache__/__init__.cpython-310.pyc
  11. BIN
      custom_components/waste_collection_schedule/__pycache__/calendar.cpython-310.pyc
  12. BIN
      custom_components/waste_collection_schedule/__pycache__/const.cpython-310.pyc
  13. BIN
      custom_components/waste_collection_schedule/__pycache__/sensor.cpython-310.pyc
  14. 116 0
      custom_components/waste_collection_schedule/calendar.py
  15. 6 0
      custom_components/waste_collection_schedule/const.py
  16. 10 0
      custom_components/waste_collection_schedule/manifest.json
  17. 246 0
      custom_components/waste_collection_schedule/sensor.py
  18. 2 0
      custom_components/waste_collection_schedule/waste_collection_schedule/__init__.py
  19. BIN
      custom_components/waste_collection_schedule/waste_collection_schedule/__pycache__/__init__.cpython-310.pyc
  20. BIN
      custom_components/waste_collection_schedule/waste_collection_schedule/__pycache__/collection.cpython-310.pyc
  21. BIN
      custom_components/waste_collection_schedule/waste_collection_schedule/__pycache__/scraper.cpython-310.pyc
  22. 71 0
      custom_components/waste_collection_schedule/waste_collection_schedule/collection.py
  23. 305 0
      custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py
  24. 159 0
      custom_components/waste_collection_schedule/waste_collection_schedule/service/AbfallnaviDe.py
  25. 69 0
      custom_components/waste_collection_schedule/waste_collection_schedule/service/EcoHarmonogramPL.py
  26. 71 0
      custom_components/waste_collection_schedule/waste_collection_schedule/service/ICS.py
  27. 64 0
      custom_components/waste_collection_schedule/waste_collection_schedule/service/ICS_v1.py
  28. 0 0
      custom_components/waste_collection_schedule/waste_collection_schedule/service/__init__.py
  29. BIN
      custom_components/waste_collection_schedule/waste_collection_schedule/source/__pycache__/awido_de.cpython-310.pyc
  30. 181 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/a_region_ch.py
  31. 140 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/abfall_io.py
  32. 100 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/abfall_zollernalbkreis_de.py
  33. 43 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/abfallnavi_de.py
  34. 107 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/alw_wf_de.py
  35. 118 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/aucklandcouncil_govt_nz.py
  36. 93 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/avl_ludwigsburg_de.py
  37. 134 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py
  38. 54 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_bad_kreuznach_de.py
  39. 60 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_es_de.py
  40. 62 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_lm_de.py
  41. 49 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_oldenburg_de.py
  42. 40 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/awbkoeln_de.py
  43. 134 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/awido_de.py
  44. 117 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/awn_de.py
  45. 76 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/awr_de.py
  46. 75 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/awsh_de.py
  47. 134 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/banyule_vic_gov_au.py
  48. 68 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/belmont_wa_gov_au.py
  49. 91 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py
  50. 103 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/bielefeld_de.py
  51. 109 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py
  52. 96 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/bradford_gov_uk.py
  53. 127 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/brisbane_qld_gov_au.py
  54. 97 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py
  55. 52 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py
  56. 70 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/cambridge_gov_uk.py
  57. 111 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py
  58. 84 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/canterbury_gov_uk.py
  59. 58 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/ccc_govt_nz.py
  60. 79 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/cheshire_east_gov_uk.py
  61. 115 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/chesterfield_gov_uk.py
  62. 69 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/cochem_zell_online_de.py
  63. 82 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/colchester_gov_uk.py
  64. 64 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/cornwall_gov_uk.py
  65. 34 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/data_umweltprofis_at.py
  66. 89 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/derby_gov_uk.py
  67. 68 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/ecoharmonogram_pl.py
  68. 91 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/egn_abfallkalender_de.py
  69. 124 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/elmbridge_gov_uk.py
  70. 105 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/environmentfirst_co_uk.py
  71. 33 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/example.py
  72. 74 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/fccenvironment_co_uk.py
  73. 83 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/guildford_gov_uk.py
  74. 53 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/huntingdonshire_gov_uk.py
  75. 69 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/hvcgroep_nl.py
  76. 60 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/hygea_be.py
  77. 221 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py
  78. 117 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/innerwest_nsw_gov_au.py
  79. 118 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/ipswich_qld_gov_au.py
  80. 43 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/jumomind_de.py
  81. 62 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/kaev_niederlausitz.py
  82. 114 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py
  83. 52 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/kwb_goslar_de.py
  84. 55 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/landkreis_rhoen_grabfeld.py
  85. 120 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/landkreis_wittmund_de.py
  86. 56 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/lerum_se.py
  87. 58 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/lindau_ch.py
  88. 37 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/lrasha_de.py
  89. 73 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/manchester_uk.py
  90. 83 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/melton_vic_gov_au.py
  91. 97 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/miljoteknik_se.py
  92. 110 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/minrenovasjon_no.py
  93. 81 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/mrsc_vic_gov_au.py
  94. 146 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/muellmax_de.py
  95. 76 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/muenchenstein_ch.py
  96. 117 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/nawma_sa_gov_au.py
  97. 63 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/newcastle_gov_uk.py
  98. 80 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/nottingham_city_gov_uk.py
  99. 82 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/nsomerset_gov_uk.py
  100. 0 0
      custom_components/waste_collection_schedule/waste_collection_schedule/source/oslokommune_no.py

+ 6 - 6
.storage/auth

@@ -392,8 +392,8 @@
         "access_token_expiration": 1800.0,
         "token": "f465000553338831c38b537b3c59ad66b6cdeae835112743e89a0338cc4e12f535f33495580b85a8e58bd1d4161baefa2da701905dd9ea7157789d68d207fa3d",
         "jwt_key": "e22e09aa01a989668ea44ef555e75c757aa53fc286f8f494fb1203861ff92c4d59a312a2a0e5d5c657daadf22f894474590fba62e2f90fd293c7a513745cd9ad",
-        "last_used_at": "2022-11-12T06:20:46.179430+00:00",
-        "last_used_ip": "79.216.224.113",
+        "last_used_at": "2022-11-16T17:55:01.098090+00:00",
+        "last_used_ip": "84.178.159.56",
         "credential_id": "5a9dbf4a6efc4b699860177fffb1d50f",
         "version": "2022.8.2"
       },
@@ -536,8 +536,8 @@
         "access_token_expiration": 1800.0,
         "token": "d8b615f214cdb39e5a5e3228ae906ee2faae9d60ac12c24c601d31e9877d1feef633b6b90a25a1954d6edcad05d9974ed2777709d105fa534b43bcd5ac4351fc",
         "jwt_key": "2aab40082ac506916881563242f128d1c5ec3ad9ece654875eb8c85c4de27de79e0d13a36fbaa0e66a8f387cd34eaf753413a8bf7dc09e0f694f97b43c574c8e",
-        "last_used_at": "2022-11-15T16:33:16.706336+00:00",
-        "last_used_ip": "84.178.156.224",
+        "last_used_at": "2022-11-16T17:10:15.299919+00:00",
+        "last_used_ip": "84.178.159.56",
         "credential_id": "9f798200ec224f4cba157fdfcadc5380",
         "version": "2022.8.2"
       },
@@ -552,8 +552,8 @@
         "access_token_expiration": 1800.0,
         "token": "c2a3df2dcedddf0525d52bccb0b619dd78ece7fb1a04bf9e4b76b3613e0e64a24fe3067302046024957ed2dc1f34e35aba421ab58d9584280d8be73c143f58e7",
         "jwt_key": "84d559812b0e9f6c4bcd11621d571d709ef508e6d39e9ed3dff67a85cc019a54a51d4f09b0ce9df102312ed79a1d6bd62591d9fcaa088ab8139296252a373ced",
-        "last_used_at": "2022-09-22T20:44:52.755278+00:00",
-        "last_used_ip": "172.20.0.2",
+        "last_used_at": "2022-11-16T11:42:34.374703+00:00",
+        "last_used_ip": "84.178.159.56",
         "credential_id": "52420d6654ab41698220ce99c0906505",
         "version": "2022.8.2"
       }

+ 1 - 1
.storage/core.config_entries

@@ -229,7 +229,7 @@
         "data": {
           "app_id": "io.homeassistant.companion.android",
           "app_name": "Home Assistant",
-          "app_version": "2022.9.1-full (2700)",
+          "app_version": "2022.10.2-full (2770)",
           "device_name": "Mi 9 SE",
           "manufacturer": "Xiaomi",
           "model": "Mi 9 SE",

+ 12 - 12
.storage/core.device_registry

@@ -35,12 +35,12 @@
         "configuration_url": "http://192.168.1.139:1400/support/review",
         "connections": [
           [
-            "mac",
-            "34:7e:5c:31:fe:52"
-          ],
-          [
             "upnp",
             "uuid:RINCON_347E5C31FE5201400"
+          ],
+          [
+            "mac",
+            "34:7e:5c:31:fe:52"
           ]
         ],
         "disabled_by": null,
@@ -134,12 +134,12 @@
         "configuration_url": "http://192.168.1.33:1400/support/review",
         "connections": [
           [
-            "upnp",
-            "uuid:RINCON_B8E9378F694401400"
-          ],
-          [
             "mac",
             "b8:e9:37:8f:69:44"
+          ],
+          [
+            "upnp",
+            "uuid:RINCON_B8E9378F694401400"
           ]
         ],
         "disabled_by": null,
@@ -200,12 +200,12 @@
         "configuration_url": "http://192.168.1.155:1400/support/review",
         "connections": [
           [
-            "upnp",
-            "uuid:RINCON_542A1B58D5CA01400"
-          ],
-          [
             "mac",
             "54:2a:1b:58:d5:ca"
+          ],
+          [
+            "upnp",
+            "uuid:RINCON_542A1B58D5CA01400"
           ]
         ],
         "disabled_by": "user",

+ 173 - 12
.storage/core.entity_registry

@@ -7330,7 +7330,7 @@
         "area_id": null,
         "capabilities": null,
         "config_entry_id": "5f2e6a346ecabb7acdded5114523a6c3",
-        "device_class": null,
+        "device_class": "battery",
         "device_id": "aa72549bafc2b92a5ad737ce6ae46a9f",
         "disabled_by": null,
         "entity_category": "diagnostic",
@@ -7342,7 +7342,7 @@
         "name": null,
         "options": {},
         "original_device_class": "battery",
-        "original_icon": "mdi:battery-charging-50",
+        "original_icon": "mdi:battery-10",
         "original_name": "Mi 9 SE Battery Level",
         "platform": "mobile_app",
         "supported_features": 0,
@@ -7365,7 +7365,7 @@
         "name": null,
         "options": {},
         "original_device_class": null,
-        "original_icon": "mdi:battery-plus",
+        "original_icon": "mdi:battery-minus",
         "original_name": "Mi 9 SE Battery State",
         "platform": "mobile_app",
         "supported_features": 0,
@@ -7388,7 +7388,7 @@
         "name": null,
         "options": {},
         "original_device_class": "plug",
-        "original_icon": "mdi:power-plug",
+        "original_icon": "mdi:power-plug-off",
         "original_name": "Mi 9 SE Is Charging",
         "platform": "mobile_app",
         "supported_features": 0,
@@ -7411,7 +7411,7 @@
         "name": null,
         "options": {},
         "original_device_class": null,
-        "original_icon": "mdi:power-plug",
+        "original_icon": "mdi:battery",
         "original_name": "Mi 9 SE Charger Type",
         "platform": "mobile_app",
         "supported_features": 0,
@@ -7445,7 +7445,7 @@
         "area_id": null,
         "capabilities": null,
         "config_entry_id": "5f2e6a346ecabb7acdded5114523a6c3",
-        "device_class": null,
+        "device_class": "temperature",
         "device_id": "aa72549bafc2b92a5ad737ce6ae46a9f",
         "disabled_by": null,
         "entity_category": "diagnostic",
@@ -7723,7 +7723,7 @@
         "area_id": null,
         "capabilities": null,
         "config_entry_id": "5f2e6a346ecabb7acdded5114523a6c3",
-        "device_class": null,
+        "device_class": "timestamp",
         "device_id": "aa72549bafc2b92a5ad737ce6ae46a9f",
         "disabled_by": "integration",
         "entity_category": "diagnostic",
@@ -7771,7 +7771,7 @@
           "state_class": "measurement"
         },
         "config_entry_id": "5f2e6a346ecabb7acdded5114523a6c3",
-        "device_class": null,
+        "device_class": "illuminance",
         "device_id": "aa72549bafc2b92a5ad737ce6ae46a9f",
         "disabled_by": "integration",
         "entity_category": null,
@@ -8030,7 +8030,7 @@
           "state_class": "measurement"
         },
         "config_entry_id": "5f2e6a346ecabb7acdded5114523a6c3",
-        "device_class": null,
+        "device_class": "signal_strength",
         "device_id": "aa72549bafc2b92a5ad737ce6ae46a9f",
         "disabled_by": "integration",
         "entity_category": "diagnostic",
@@ -8099,7 +8099,7 @@
         "area_id": null,
         "capabilities": null,
         "config_entry_id": "5f2e6a346ecabb7acdded5114523a6c3",
-        "device_class": null,
+        "device_class": "timestamp",
         "device_id": "aa72549bafc2b92a5ad737ce6ae46a9f",
         "disabled_by": "integration",
         "entity_category": null,
@@ -8575,7 +8575,7 @@
         "area_id": null,
         "capabilities": null,
         "config_entry_id": "5f2e6a346ecabb7acdded5114523a6c3",
-        "device_class": null,
+        "device_class": "power",
         "device_id": "aa72549bafc2b92a5ad737ce6ae46a9f",
         "disabled_by": null,
         "entity_category": "diagnostic",
@@ -8587,7 +8587,7 @@
         "name": null,
         "options": {},
         "original_device_class": "power",
-        "original_icon": "mdi:battery-minus",
+        "original_icon": "mdi:battery-plus",
         "original_name": "Mi 9 SE Battery Power",
         "platform": "mobile_app",
         "supported_features": 0,
@@ -9157,6 +9157,167 @@
         "supported_features": 0,
         "unique_id": "00:0d:6f:ff:fe:07:e6:d9-01-0006-duration",
         "unit_of_measurement": null
+      },
+      {
+        "area_id": null,
+        "capabilities": null,
+        "config_entry_id": "5f2e6a346ecabb7acdded5114523a6c3",
+        "device_class": null,
+        "device_id": "aa72549bafc2b92a5ad737ce6ae46a9f",
+        "disabled_by": "integration",
+        "entity_category": null,
+        "entity_id": "sensor.mi_9_se_screen_brightness",
+        "hidden_by": null,
+        "icon": null,
+        "id": "633e085da0862ee306353f01716315c8",
+        "has_entity_name": false,
+        "name": null,
+        "options": {},
+        "original_device_class": null,
+        "original_icon": "mdi:brightness-6",
+        "original_name": "Mi 9 SE Screen Brightness",
+        "platform": "mobile_app",
+        "supported_features": 0,
+        "unique_id": "67234ec04648d78af8fa7656afc1dd24d9505ab5e6da6653a19f0bc3ff9a21cc_screen_brightness",
+        "unit_of_measurement": null
+      },
+      {
+        "area_id": null,
+        "capabilities": null,
+        "config_entry_id": null,
+        "device_class": null,
+        "device_id": null,
+        "disabled_by": null,
+        "entity_category": null,
+        "entity_id": "calendar.awido",
+        "hidden_by": null,
+        "icon": null,
+        "id": "ce1baf6a4418e65fe111217721b5a68f",
+        "has_entity_name": false,
+        "name": null,
+        "options": {},
+        "original_device_class": null,
+        "original_icon": null,
+        "original_name": "AWIDO",
+        "platform": "waste_collection_schedule",
+        "supported_features": 0,
+        "unique_id": "awido_de[('city', 'Haßloch'), ('customer', 'awb-duerkheim'), ('street', 'Nelkenweg')]_calendar",
+        "unit_of_measurement": null
+      },
+      {
+        "area_id": null,
+        "capabilities": null,
+        "config_entry_id": null,
+        "device_class": null,
+        "device_id": null,
+        "disabled_by": null,
+        "entity_category": null,
+        "entity_id": "sensor.abfallnaechste",
+        "hidden_by": null,
+        "icon": null,
+        "id": "7bbcb0ff083a402918200eacb2112a5d",
+        "has_entity_name": false,
+        "name": "Nächster Abfall",
+        "options": {},
+        "original_device_class": null,
+        "original_icon": null,
+        "original_name": "AbfallNaechste",
+        "platform": "waste_collection_schedule",
+        "supported_features": 0,
+        "unique_id": "AbfallNaechste",
+        "unit_of_measurement": null
+      },
+      {
+        "area_id": null,
+        "capabilities": null,
+        "config_entry_id": null,
+        "device_class": null,
+        "device_id": null,
+        "disabled_by": null,
+        "entity_category": null,
+        "entity_id": "sensor.abfallrest",
+        "hidden_by": null,
+        "icon": null,
+        "id": "cf81d0e885559103c9def2586da64834",
+        "has_entity_name": false,
+        "name": "Rest-Müll",
+        "options": {},
+        "original_device_class": null,
+        "original_icon": null,
+        "original_name": "AbfallRest",
+        "platform": "waste_collection_schedule",
+        "supported_features": 0,
+        "unique_id": "AbfallRest",
+        "unit_of_measurement": null
+      },
+      {
+        "area_id": null,
+        "capabilities": null,
+        "config_entry_id": null,
+        "device_class": null,
+        "device_id": null,
+        "disabled_by": null,
+        "entity_category": null,
+        "entity_id": "sensor.abfallbio",
+        "hidden_by": null,
+        "icon": null,
+        "id": "6faf5e4f89b35a584650f129a981f69c",
+        "has_entity_name": false,
+        "name": "Bio-Müll",
+        "options": {},
+        "original_device_class": null,
+        "original_icon": null,
+        "original_name": "AbfallBio",
+        "platform": "waste_collection_schedule",
+        "supported_features": 0,
+        "unique_id": "AbfallBio",
+        "unit_of_measurement": null
+      },
+      {
+        "area_id": null,
+        "capabilities": null,
+        "config_entry_id": null,
+        "device_class": null,
+        "device_id": null,
+        "disabled_by": null,
+        "entity_category": null,
+        "entity_id": "sensor.abfallgelbersack",
+        "hidden_by": null,
+        "icon": "mdi:recycle",
+        "id": "fdb4d3b3620c976d2cd51ef94d5a1aa8",
+        "has_entity_name": false,
+        "name": "Gelber Sack",
+        "options": {},
+        "original_device_class": null,
+        "original_icon": null,
+        "original_name": "AbfallGelberSack",
+        "platform": "waste_collection_schedule",
+        "supported_features": 0,
+        "unique_id": "AbfallGelberSack",
+        "unit_of_measurement": null
+      },
+      {
+        "area_id": null,
+        "capabilities": null,
+        "config_entry_id": null,
+        "device_class": null,
+        "device_id": null,
+        "disabled_by": null,
+        "entity_category": null,
+        "entity_id": "sensor.abfallpapier",
+        "hidden_by": null,
+        "icon": null,
+        "id": "213aab3124fb356b7f31d0fe460aab5c",
+        "has_entity_name": false,
+        "name": "Papier-Müll",
+        "options": {},
+        "original_device_class": null,
+        "original_icon": null,
+        "original_name": "AbfallPapier",
+        "platform": "waste_collection_schedule",
+        "supported_features": 0,
+        "unique_id": "AbfallPapier",
+        "unit_of_measurement": null
       }
     ]
   }

File diff suppressed because it is too large
+ 523 - 523
.storage/core.restore_state


File diff suppressed because it is too large
+ 414 - 395
.storage/hacs.repositories


+ 2 - 2
.storage/http

@@ -10,9 +10,9 @@
     "ip_ban_enabled": true,
     "login_attempts_threshold": 3,
     "server_port": 8123,
+    "ssl_profile": "modern",
     "cors_allowed_origins": [
       "https://cast.home-assistant.io"
-    ],
-    "ssl_profile": "modern"
+    ]
   }
 }

+ 11 - 0
configuration.yaml

@@ -34,6 +34,7 @@ group: !include groups.yaml
 automation: !include automations.yaml
 script: !include scripts.yaml
 scene: !include scenes.yaml
+sensor: !include sensor.yaml
 
 #Stop to record the history of some domains and entities
 recorder:
@@ -50,6 +51,16 @@ recorder:
       - sensor.total_energy_text
 
 # Home Assistant configuration.yaml
+#waste_collection_schedule
+waste_collection_schedule:
+  sources:
+    - name: awido_de
+      args:
+        customer: awb-duerkheim
+        city: 'Haßloch'
+        street: Nelkenweg
+  day_switch_time: "11:00"
+
 template:
   - sensor:
       - name: "Total Energy Consumption"

+ 229 - 0
custom_components/waste_collection_schedule/__init__.py

@@ -0,0 +1,229 @@
+"""Waste Collection Schedule Component."""
+import logging
+import site
+from pathlib import Path
+from random import randrange
+
+import homeassistant.helpers.config_validation as cv
+import homeassistant.util.dt as dt_util
+import voluptuous as vol
+from homeassistant.core import HomeAssistant, callback
+from homeassistant.helpers.dispatcher import dispatcher_send
+
+from .const import DOMAIN, UPDATE_SENSORS_SIGNAL
+
+from homeassistant.helpers.event import async_call_later  # isort:skip
+from homeassistant.helpers.event import async_track_time_change  # isort:skip
+
+# add module directory to path
+package_dir = Path(__file__).resolve().parents[0]
+site.addsitedir(str(package_dir))
+from waste_collection_schedule import Customize, Scraper  # type: ignore # isort:skip # noqa: E402
+
+_LOGGER = logging.getLogger(__name__)
+
+CONF_SOURCES = "sources"
+CONF_SOURCE_NAME = "name"
+CONF_SOURCE_ARGS = "args"  # scraper-source arguments
+CONF_SOURCE_CALENDAR_TITLE = "calendar_title"
+CONF_SEPARATOR = "separator"
+CONF_FETCH_TIME = "fetch_time"
+CONF_RANDOM_FETCH_TIME_OFFSET = "random_fetch_time_offset"
+CONF_DAY_SWITCH_TIME = "day_switch_time"
+
+CONF_CUSTOMIZE = "customize"
+CONF_TYPE = "type"
+CONF_ALIAS = "alias"
+CONF_SHOW = "show"
+CONF_ICON = "icon"
+CONF_PICTURE = "picture"
+CONF_USE_DEDICATED_CALENDAR = "use_dedicated_calendar"
+CONF_DEDICATED_CALENDAR_TITLE = "dedicated_calendar_title"
+
+CUSTOMIZE_CONFIG = vol.Schema(
+    {
+        vol.Optional(CONF_TYPE): cv.string,
+        vol.Optional(CONF_ALIAS): cv.string,
+        vol.Optional(CONF_SHOW): cv.boolean,
+        vol.Optional(CONF_ICON): cv.icon,
+        vol.Optional(CONF_PICTURE): cv.string,
+        vol.Optional(CONF_USE_DEDICATED_CALENDAR): cv.boolean,
+        vol.Optional(CONF_DEDICATED_CALENDAR_TITLE): cv.string,
+    }
+)
+
+SOURCE_CONFIG = vol.Schema(
+    {
+        vol.Required(CONF_SOURCE_NAME): cv.string,
+        vol.Required(CONF_SOURCE_ARGS): dict,
+        vol.Optional(CONF_CUSTOMIZE, default=[]): vol.All(
+            cv.ensure_list, [CUSTOMIZE_CONFIG]
+        ),
+        vol.Optional(CONF_SOURCE_CALENDAR_TITLE): cv.string,
+    }
+)
+
+CONFIG_SCHEMA = vol.Schema(
+    {
+        DOMAIN: vol.Schema(
+            {
+                vol.Required(CONF_SOURCES): vol.All(cv.ensure_list, [SOURCE_CONFIG]),
+                vol.Optional(CONF_SEPARATOR, default=", "): cv.string,
+                vol.Optional(CONF_FETCH_TIME, default="01:00"): cv.time,
+                vol.Optional(
+                    CONF_RANDOM_FETCH_TIME_OFFSET, default=60
+                ): cv.positive_int,
+                vol.Optional(CONF_DAY_SWITCH_TIME, default="10:00"): cv.time,
+            }
+        )
+    },
+    extra=vol.ALLOW_EXTRA,
+)
+
+
+async def async_setup(hass: HomeAssistant, config: dict):
+    """Set up the component. config contains data from configuration.yaml."""
+    # create empty api object as singleton
+    api = WasteCollectionApi(
+        hass,
+        separator=config[DOMAIN][CONF_SEPARATOR],
+        fetch_time=config[DOMAIN][CONF_FETCH_TIME],
+        random_fetch_time_offset=config[DOMAIN][CONF_RANDOM_FETCH_TIME_OFFSET],
+        day_switch_time=config[DOMAIN][CONF_DAY_SWITCH_TIME],
+    )
+
+    # create scraper(s)
+    for source in config[DOMAIN][CONF_SOURCES]:
+        # create customize object
+        customize = {}
+        for c in source.get(CONF_CUSTOMIZE, {}):
+            customize[c[CONF_TYPE]] = Customize(
+                waste_type=c[CONF_TYPE],
+                alias=c.get(CONF_ALIAS),
+                show=c.get(CONF_SHOW, True),
+                icon=c.get(CONF_ICON),
+                picture=c.get(CONF_PICTURE),
+                use_dedicated_calendar=c.get(CONF_USE_DEDICATED_CALENDAR, False),
+                dedicated_calendar_title=c.get(CONF_DEDICATED_CALENDAR_TITLE, False),
+            )
+        api.add_scraper(
+            source_name=source[CONF_SOURCE_NAME],
+            customize=customize,
+            calendar_title=source.get(CONF_SOURCE_CALENDAR_TITLE),
+            source_args=source.get(CONF_SOURCE_ARGS, {}),
+        )
+
+    # store api object
+    hass.data.setdefault(DOMAIN, api)
+
+    # load calendar platform
+    await hass.helpers.discovery.async_load_platform(
+        "calendar", DOMAIN, {"api": api}, config
+    )
+
+    # initial fetch of all data
+    hass.add_job(api._fetch)
+
+    return True
+
+
+class WasteCollectionApi:
+    def __init__(
+        self, hass, separator, fetch_time, random_fetch_time_offset, day_switch_time
+    ):
+        self._hass = hass
+        self._scrapers = []
+        self._separator = separator
+        self._fetch_time = fetch_time
+        self._random_fetch_time_offset = random_fetch_time_offset
+        self._day_switch_time = day_switch_time
+
+        # start timer to fetch date once per day
+        async_track_time_change(
+            hass,
+            self._fetch_callback,
+            self._fetch_time.hour,
+            self._fetch_time.minute,
+            self._fetch_time.second,
+        )
+
+        # start timer for day-switch time
+        if self._day_switch_time != self._fetch_time:
+            async_track_time_change(
+                hass,
+                self._update_sensors_callback,
+                self._day_switch_time.hour,
+                self._day_switch_time.minute,
+                self._day_switch_time.second,
+            )
+
+        # add a timer at midnight (if not already there) to update days-to
+        midnight = dt_util.parse_time("00:00")
+        if midnight != self._fetch_time and midnight != self._day_switch_time:
+            async_track_time_change(
+                hass,
+                self._update_sensors_callback,
+                midnight.hour,
+                midnight.minute,
+                midnight.second,
+            )
+
+    @property
+    def separator(self):
+        """Separator string, used to separator waste types."""
+        return self._separator
+
+    @property
+    def fetch_time(self):
+        """When to fetch to data."""
+        return self._fetch_time
+
+    @property
+    def day_switch_time(self):
+        """When to hide entries for today."""
+        return self._day_switch_time
+
+    def add_scraper(
+        self,
+        source_name,
+        customize,
+        source_args,
+        calendar_title,
+    ):
+        self._scrapers.append(
+            Scraper.create(
+                source_name=source_name,
+                customize=customize,
+                source_args=source_args,
+                calendar_title=calendar_title,
+            )
+        )
+
+    def _fetch(self, *_):
+        for scraper in self._scrapers:
+            scraper.fetch()
+
+        self._update_sensors_callback()
+
+    @property
+    def scrapers(self):
+        return self._scrapers
+
+    def get_scraper(self, index):
+        return self._scrapers[index] if index < len(self._scrapers) else None
+
+    @callback
+    def _fetch_callback(self, *_):
+        async_call_later(
+            self._hass,
+            randrange(0, 60 * self._random_fetch_time_offset),
+            self._fetch_now_callback,
+        )
+
+    @callback
+    def _fetch_now_callback(self, *_):
+        self._hass.add_job(self._fetch)
+
+    @callback
+    def _update_sensors_callback(self, *_):
+        dispatcher_send(self._hass, UPDATE_SENSORS_SIGNAL)

BIN
custom_components/waste_collection_schedule/__pycache__/__init__.cpython-310.pyc


BIN
custom_components/waste_collection_schedule/__pycache__/calendar.cpython-310.pyc


BIN
custom_components/waste_collection_schedule/__pycache__/const.cpython-310.pyc


BIN
custom_components/waste_collection_schedule/__pycache__/sensor.cpython-310.pyc


+ 116 - 0
custom_components/waste_collection_schedule/calendar.py

@@ -0,0 +1,116 @@
+"""Calendar platform support for Waste Collection Schedule."""
+
+import logging
+from datetime import datetime, timedelta
+
+from homeassistant.components.calendar import CalendarEntity, CalendarEvent
+from homeassistant.core import HomeAssistant
+
+from custom_components.waste_collection_schedule.waste_collection_schedule.scraper import Scraper
+
+_LOGGER = logging.getLogger(__name__)
+
+
+async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
+    """Set up calendar platform."""
+    # We only want this platform to be set up via discovery.
+    if discovery_info is None:
+        return
+
+    entities = []
+
+    api = discovery_info["api"]
+
+    for scraper in api.scrapers:
+        dedicated_calendar_types = scraper.get_dedicated_calendar_types()
+        global_calendar_types = scraper.get_global_calendar_types()
+
+        if dedicated_calendar_types is not None:
+            for type in dedicated_calendar_types:
+                unique_id = calc_unique_calendar_id(scraper, type)
+
+                entities.append(
+                    WasteCollectionCalendar(
+                        api,
+                        scraper,
+                        scraper.get_calendar_title_for_type(type),
+                        [scraper.get_collection_type(type)],
+                        unique_id,
+                    )
+                )
+
+        if global_calendar_types is not None or dedicated_calendar_types is None:
+            unique_id = calc_unique_calendar_id(scraper)
+            entities.append(
+                WasteCollectionCalendar(
+                    api,
+                    scraper,
+                    scraper.calendar_title,
+                    [
+                        scraper.get_collection_type(type)
+                        for type in global_calendar_types
+                    ]
+                    if global_calendar_types is not None
+                    else None,
+                    unique_id,
+                )
+            )
+
+    async_add_entities(entities)
+
+
+class WasteCollectionCalendar(CalendarEntity):
+    """Calendar entity class."""
+
+    def __init__(self, api, scraper, name, types, unique_id: str):
+        self._api = api
+        self._scraper = scraper
+        self._name = name
+        self._types = types
+        self._unique_id = unique_id
+        self._attr_unique_id = unique_id
+
+    @property
+    def name(self):
+        """Return entity name."""
+        return self._name
+
+    @property
+    def event(self):
+        """Return next collection event."""
+        collections = self._scraper.get_upcoming(
+            count=1, include_today=True, types=self._types
+        )
+
+        if len(collections) == 0:
+            return None
+        else:
+            return self._convert(collections[0])
+
+    async def async_get_events(
+        self, hass: HomeAssistant, start_date: datetime, end_date: datetime
+    ):
+        """Return all events within specified time span."""
+        events = []
+
+        for collection in self._scraper.get_upcoming(
+            include_today=True, types=self._types
+        ):
+            event = self._convert(collection)
+
+            if start_date <= event.start_datetime_local <= end_date:
+                events.append(event)
+
+        return events
+
+    def _convert(self, collection) -> CalendarEvent:
+        """Convert an collection into a Home Assistant calendar event."""
+        return CalendarEvent(
+            summary=collection.type,
+            start=collection.date,
+            end=collection.date + timedelta(days=1),
+        )
+
+
+def calc_unique_calendar_id(scraper: Scraper, type: str = None):
+    return scraper.unique_id + ("_" + type if type is not None else "") + "_calendar"

+ 6 - 0
custom_components/waste_collection_schedule/const.py

@@ -0,0 +1,6 @@
+"""Constants for the Waste Collection Schedule component."""
+
+# Component domain, used to store component data in hass data.
+DOMAIN = "waste_collection_schedule"
+
+UPDATE_SENSORS_SIGNAL = "wcs_update_sensors_signal"

+ 10 - 0
custom_components/waste_collection_schedule/manifest.json

@@ -0,0 +1,10 @@
+{
+  "domain": "waste_collection_schedule",
+  "name": "waste_collection_schedule",
+  "documentation": "https://github.com/mampfes/hacs_waste_collection_schedule#readme",
+  "requirements": ["icalendar", "recurring_ical_events", "icalevents", "bs4"],
+  "dependencies": [],
+  "codeowners": ["@mampfes"],
+  "iot_class": "cloud_polling",
+  "version": "1.27.0"
+}

+ 246 - 0
custom_components/waste_collection_schedule/sensor.py

@@ -0,0 +1,246 @@
+"""Sensor platform support for Waste Collection Schedule."""
+
+import datetime
+import logging
+from enum import Enum
+
+import homeassistant.helpers.config_validation as cv
+import voluptuous as vol
+from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
+from homeassistant.const import CONF_NAME, CONF_VALUE_TEMPLATE
+from homeassistant.core import callback
+from homeassistant.helpers.dispatcher import async_dispatcher_connect
+
+from .const import DOMAIN, UPDATE_SENSORS_SIGNAL
+
+_LOGGER = logging.getLogger(__name__)
+
+CONF_SOURCE_INDEX = "source_index"
+CONF_DETAILS_FORMAT = "details_format"
+CONF_COUNT = "count"
+CONF_LEADTIME = "leadtime"
+CONF_DATE_TEMPLATE = "date_template"
+CONF_COLLECTION_TYPES = "types"
+CONF_ADD_DAYS_TO = "add_days_to"
+
+
+class DetailsFormat(Enum):
+    """Values for CONF_DETAILS_FORMAT."""
+
+    upcoming = "upcoming"  # list of "<date> <type1, type2, ...>"
+    appointment_types = "appointment_types"  # list of "<type> <date>"
+    generic = "generic"  # all values in separate attributes
+
+
+PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
+    {
+        vol.Required(CONF_NAME): cv.string,
+        vol.Optional(CONF_SOURCE_INDEX, default=0): cv.positive_int,
+        vol.Optional(CONF_DETAILS_FORMAT, default="upcoming"): cv.enum(DetailsFormat),
+        vol.Optional(CONF_COUNT): cv.positive_int,
+        vol.Optional(CONF_LEADTIME): cv.positive_int,
+        vol.Optional(CONF_COLLECTION_TYPES): cv.ensure_list,
+        vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
+        vol.Optional(CONF_DATE_TEMPLATE): cv.template,
+        vol.Optional(CONF_ADD_DAYS_TO, default=False): cv.boolean,
+    }
+)
+
+
+async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
+    value_template = config.get(CONF_VALUE_TEMPLATE)
+    if value_template is not None:
+        value_template.hass = hass
+
+    date_template = config.get(CONF_DATE_TEMPLATE)
+    if date_template is not None:
+        date_template.hass = hass
+
+    entities = []
+
+    entities.append(
+        ScheduleSensor(
+            hass=hass,
+            api=hass.data[DOMAIN],
+            name=config[CONF_NAME],
+            source_index=config[CONF_SOURCE_INDEX],
+            details_format=config[CONF_DETAILS_FORMAT],
+            count=config.get(CONF_COUNT),
+            leadtime=config.get(CONF_LEADTIME),
+            collection_types=config.get(CONF_COLLECTION_TYPES),
+            value_template=value_template,
+            date_template=date_template,
+            add_days_to=config.get(CONF_ADD_DAYS_TO),
+        )
+    )
+
+    async_add_entities(entities)
+
+
+class ScheduleSensor(SensorEntity):
+    """Base for sensors."""
+
+    def __init__(
+        self,
+        hass,
+        api,
+        name,
+        source_index,
+        details_format,
+        count,
+        leadtime,
+        collection_types,
+        value_template,
+        date_template,
+        add_days_to,
+    ):
+        """Initialize the entity."""
+        self._api = api
+        self._source_index = source_index
+        self._details_format = details_format
+        self._count = count
+        self._leadtime = leadtime
+        self._collection_types = collection_types
+        self._value_template = value_template
+        self._date_template = date_template
+        self._add_days_to = add_days_to
+
+        self._value = None
+
+        # entity attributes
+        self._attr_name = name
+        self._attr_unique_id = name
+        self._attr_should_poll = False
+
+        async_dispatcher_connect(hass, UPDATE_SENSORS_SIGNAL, self._update_sensor)
+
+    @property
+    def native_value(self):
+        """Return the state of the entity."""
+        return self._value
+
+    async def async_added_to_hass(self):
+        """Entities have been added to hass."""
+        self._update_sensor()
+
+    @property
+    def _scraper(self):
+        return self._api.get_scraper(self._source_index)
+
+    @property
+    def _separator(self):
+        """Return separator string used to join waste types."""
+        return self._api.separator
+
+    @property
+    def _include_today(self):
+        """Return true if collections for today shall be included in the results."""
+        return datetime.datetime.now().time() < self._api._day_switch_time
+
+    def _add_refreshtime(self):
+        """Add refresh-time (= last fetch time) to device-state-attributes."""
+        refreshtime = ""
+        if self._scraper.refreshtime is not None:
+            refreshtime = self._scraper.refreshtime.strftime("%x %X")
+        self._attr_attribution = f"Last update: {refreshtime}"
+
+    def _set_state(self, upcoming):
+        """Set entity state with default format."""
+        if len(upcoming) == 0:
+            self._value = None
+            self._attr_icon = "mdi:trash-can"
+            self._attr_entity_picture = None
+            return
+
+        collection = upcoming[0]
+        # collection::=CollectionGroup{date=2020-04-01, types=['Type1', 'Type2']}
+
+        if self._value_template is not None:
+            self._value = self._value_template.async_render_with_possible_json_value(
+                collection, None
+            )
+        else:
+            self._value = (
+                f"{self._separator.join(collection.types)} in {collection.daysTo} days"
+            )
+
+        self._attr_icon = collection.icon or "mdi:trash-can"
+        self._attr_entity_picture = collection.picture
+
+    def _render_date(self, collection):
+        if self._date_template is not None:
+            return self._date_template.async_render_with_possible_json_value(
+                collection, None
+            )
+        else:
+            return collection.date.isoformat()
+
+    @callback
+    def _update_sensor(self):
+        """Update the state and the device-state-attributes of the entity.
+
+        Called if a new data has been fetched from the scraper source.
+        """
+        if self._scraper is None:
+            _LOGGER.error(f"source_index {self._source_index} out of range")
+            return None
+
+        upcoming1 = self._scraper.get_upcoming_group_by_day(
+            count=1, types=self._collection_types, include_today=self._include_today,
+        )
+
+        self._set_state(upcoming1)
+
+        attributes = {}
+
+        collection_types = (
+            sorted(self._scraper.get_types())
+            if self._collection_types is None
+            else self._collection_types
+        )
+
+        if self._details_format == DetailsFormat.upcoming:
+            # show upcoming events list in details
+            upcoming = self._scraper.get_upcoming_group_by_day(
+                count=self._count,
+                leadtime=self._leadtime,
+                types=self._collection_types,
+                include_today=self._include_today,
+            )
+            for collection in upcoming:
+                attributes[self._render_date(collection)] = self._separator.join(
+                    collection.types
+                )
+        elif self._details_format == DetailsFormat.appointment_types:
+            # show list of collections in details
+            for t in collection_types:
+                collections = self._scraper.get_upcoming(
+                    count=1, types=[t], include_today=self._include_today
+                )
+                date = (
+                    "" if len(collections) == 0 else self._render_date(collections[0])
+                )
+                attributes[t] = date
+        elif self._details_format == DetailsFormat.generic:
+            # insert generic attributes into details
+            attributes["types"] = collection_types
+            attributes["upcoming"] = self._scraper.get_upcoming(
+                count=self._count,
+                leadtime=self._leadtime,
+                types=self._collection_types,
+                include_today=self._include_today,
+            )
+            refreshtime = ""
+            if self._scraper.refreshtime is not None:
+                refreshtime = self._scraper.refreshtime.isoformat(timespec="seconds")
+            attributes["last_update"] = refreshtime
+
+        if len(upcoming1) > 0:
+            if self._add_days_to:
+                attributes["daysTo"] = upcoming1[0].daysTo
+
+        self._attr_extra_state_attributes = attributes
+        self._add_refreshtime()
+
+        if self.hass is not None:
+            self.async_write_ha_state()

+ 2 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/__init__.py

@@ -0,0 +1,2 @@
+from .collection import Collection, CollectionBase, CollectionGroup  # type: ignore # isort:skip # noqa: F401
+from .scraper import Customize, Scraper  # noqa: F401

BIN
custom_components/waste_collection_schedule/waste_collection_schedule/__pycache__/__init__.cpython-310.pyc


BIN
custom_components/waste_collection_schedule/waste_collection_schedule/__pycache__/collection.cpython-310.pyc


BIN
custom_components/waste_collection_schedule/waste_collection_schedule/__pycache__/scraper.cpython-310.pyc


+ 71 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/collection.py

@@ -0,0 +1,71 @@
+import datetime
+
+
+class CollectionBase(dict):  # inherit from dict to enable JSON serialization
+    def __init__(self, date: datetime.date, icon: str = None, picture: str = None):
+        dict.__init__(self, date=date.isoformat(), icon=icon, picture=picture)
+        self._date = date  # store date also as python date object
+
+    @property
+    def date(self):
+        return self._date
+
+    @property
+    def daysTo(self):
+        return (self._date - datetime.datetime.now().date()).days
+
+    @property
+    def icon(self):
+        return self["icon"]
+
+    def set_icon(self, icon: str):
+        self["icon"] = icon
+
+    @property
+    def picture(self):
+        return self["picture"]
+
+    def set_picture(self, picture: str):
+        self["picture"] = picture
+
+
+class Collection(CollectionBase):
+    def __init__(
+        self, date: datetime.date, t: str, icon: str = None, picture: str = None
+    ):
+        CollectionBase.__init__(self, date=date, icon=icon, picture=picture)
+        self["type"] = t
+
+    @property
+    def type(self):
+        return self["type"]
+
+    def set_type(self, t: str):
+        self["type"] = t
+
+    def __repr__(self):
+        return f"Collection{{date={self.date}, type={self.type}}}"
+
+
+class CollectionGroup(CollectionBase):
+    def __init__(self, date: datetime.date):
+        CollectionBase.__init__(self, date=date)
+
+    @staticmethod
+    def create(group):
+        """Create from list of Collection's."""
+        x = CollectionGroup(group[0].date)
+        if len(group) == 1:
+            x.set_icon(group[0].icon)
+            x.set_picture(group[0].picture)
+        else:
+            x.set_icon(f"mdi:numeric-{len(group)}-box-multiple")
+        x["types"] = list(it.type for it in group)
+        return x
+
+    @property
+    def types(self):
+        return self["types"]
+
+    def __repr__(self):
+        return f"CollectionGroup{{date={self.date}, types={self.types}}}"

+ 305 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py

@@ -0,0 +1,305 @@
+#!/usr/bin/env python3
+
+import datetime
+import importlib
+import itertools
+import logging
+import traceback
+from typing import Dict, List, Optional
+
+from .collection import Collection, CollectionGroup
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Customize:
+    """Customize one waste collection type."""
+
+    def __init__(
+        self,
+        waste_type,
+        alias=None,
+        show=True,
+        icon=None,
+        picture=None,
+        use_dedicated_calendar=False,
+        dedicated_calendar_title=None,
+    ):
+        self._waste_type = waste_type
+        self._alias = alias
+        self._show = show
+        self._icon = icon
+        self._picture = picture
+        self._use_dedicated_calendar = use_dedicated_calendar
+        self._dedicated_calendar_title = dedicated_calendar_title
+
+    @property
+    def waste_type(self):
+        return self._waste_type
+
+    @property
+    def alias(self):
+        return self._alias
+
+    @property
+    def show(self):
+        return self._show
+
+    @property
+    def icon(self):
+        return self._icon
+
+    @property
+    def picture(self):
+        return self._picture
+
+    @property
+    def use_dedicated_calendar(self):
+        return self._use_dedicated_calendar
+
+    @property
+    def dedicated_calendar_title(self):
+        return self._dedicated_calendar_title
+
+    def __repr__(self):
+        return f"Customize{{waste_type={self._waste_type}, alias={self._alias}, show={self._show}, icon={self._icon}, picture={self._picture}}}"
+
+
+def filter_function(entry: Collection, customize: Dict[str, Customize]):
+    c = customize.get(entry.type)
+    if c is None:
+        return True
+    else:
+        return c.show
+
+
+def customize_function(entry: Collection, customize: Dict[str, Customize]):
+    c = customize.get(entry.type)
+    if c is not None:
+        if c.alias is not None:
+            entry.set_type(c.alias)
+        if c.icon is not None:
+            entry.set_icon(c.icon)
+        if c.picture is not None:
+            entry.set_picture(c.picture)
+    return entry
+
+
+class Scraper:
+    def __init__(
+        self,
+        source,
+        customize: Dict[str, Customize],
+        title: str,
+        description: str,
+        url: Optional[str],
+        calendar_title: Optional[str],
+        unique_id: str,
+    ):
+        self._source = source
+        self._customize = customize
+        self._title = title
+        self._description = description
+        self._url = url
+        self._calendar_title = calendar_title
+        self._unique_id = unique_id
+        self._refreshtime = None
+        self._entries: List[Collection] = []
+
+    @property
+    def source(self):
+        return self._source
+
+    @property
+    def refreshtime(self):
+        return self._refreshtime
+
+    @property
+    def title(self):
+        return self._title
+
+    @property
+    def description(self):
+        return self._description
+
+    @property
+    def url(self):
+        return self._url
+
+    @property
+    def calendar_title(self):
+        return self._calendar_title or self._title
+
+    @property
+    def unique_id(self):
+        return self._unique_id
+
+    def fetch(self):
+        """Fetch data from source."""
+        try:
+            # fetch returns a list of Collection's
+            entries = self._source.fetch()
+        except Exception:
+            _LOGGER.error(
+                f"fetch failed for source {self._title}:\n{traceback.format_exc()}"
+            )
+            return
+        self._refreshtime = datetime.datetime.now()
+
+        # strip whitespaces
+        for e in entries:
+            e.set_type(e.type.strip())
+
+        # filter hidden entries
+        entries = filter(lambda x: filter_function(x, self._customize), entries)
+
+        # customize fetched entries
+        entries = map(lambda x: customize_function(x, self._customize), entries)
+
+        self._entries = list(entries)
+
+    def get_types(self):
+        """Return set() of all collection types."""
+        types = set()
+        for e in self._entries:
+            types.add(e.type)
+        return types
+
+    def get_dedicated_calendar_types(self):
+        types = set()
+
+        for key, customize in self._customize.items():
+            if customize.show and customize.use_dedicated_calendar:
+                types.add(key)
+
+        return types or None
+
+    def get_global_calendar_types(self):
+        types = set()
+
+        for key, customize in self._customize.items():
+            if customize.show and not customize.use_dedicated_calendar:
+                types.add(key)
+
+        return types or None
+
+    def get_upcoming(self, count=None, leadtime=None, types=None, include_today=False):
+        """Return list of all entries, limited by count and/or leadtime.
+
+        Keyword arguments:
+        count -- limits the number of returned entries (default=10)
+        leadtime -- limits the timespan in days of returned entries (default=7, 0 = today)
+        """
+        return self._filter(
+            self._entries,
+            count=count,
+            leadtime=leadtime,
+            types=types,
+            include_today=include_today,
+        )
+
+    def get_upcoming_group_by_day(
+        self, count=None, leadtime=None, types=None, include_today=False
+    ):
+        """Return list of all entries, grouped by day, limited by count and/or leadtime."""
+        entries = []
+
+        iterator = itertools.groupby(
+            self._filter(
+                self._entries,
+                leadtime=leadtime,
+                types=types,
+                include_today=include_today,
+            ),
+            lambda e: e.date,
+        )
+
+        for key, group in iterator:
+            entries.append(CollectionGroup.create(list(group)))
+        if count is not None:
+            entries = entries[:count]
+
+        return entries
+
+    def get_calendar_title_for_type(self, type):
+        c = self._customize.get(type)
+        if c is not None and c.dedicated_calendar_title:
+            return c.dedicated_calendar_title
+
+        return self.calendar_title
+
+    def get_collection_type(self, type):
+        c = self._customize.get(type)
+        if c is not None and c.alias:
+            return c.alias
+
+        return type
+
+    def _filter(
+        self, entries, count=None, leadtime=None, types=None, include_today=False
+    ):
+        # remove unwanted waste types
+        if types is not None:
+            # generate set
+            types_set = {t for t in types}
+            entries = list(filter(lambda e: e.type in types_set, self._entries))
+
+        # remove expired entries
+        now = datetime.datetime.now().date()
+        if include_today:
+            entries = list(filter(lambda e: e.date >= now, entries))
+        else:
+            entries = list(filter(lambda e: e.date > now, entries))
+
+        # remove entries which are too far in the future (0 = today)
+        if leadtime is not None:
+            x = now + datetime.timedelta(days=leadtime)
+            entries = list(filter(lambda e: e.date <= x, entries))
+
+        # ensure that entries are sorted by date
+        entries.sort(key=lambda e: e.date)
+
+        # remove surplus entries
+        if count is not None:
+            entries = entries[:count]
+
+        return entries
+
+    @staticmethod
+    def create(
+        source_name: str,
+        customize: Dict[str, Customize],
+        source_args,
+        calendar_title: Optional[str] = None,
+    ):
+        # load source module
+
+        # for home-assistant, use the last 3 folders, e.g. custom_component/wave_collection_schedule/waste_collection_schedule
+        # otherwise, only use waste_collection_schedule
+        try:
+            source_module = importlib.import_module(
+                f"waste_collection_schedule.source.{source_name}"
+            )
+        except ImportError:
+            _LOGGER.error(f"source not found: {source_name}")
+            return
+
+        # create source
+        source = source_module.Source(**source_args)  # type: ignore
+
+        # create scraper
+        g = Scraper(
+            source=source,
+            customize=customize,
+            title=source_module.TITLE,  # type: ignore[attr-defined]
+            description=source_module.DESCRIPTION,  # type: ignore[attr-defined]
+            url=source_module.URL,  # type: ignore[attr-defined]
+            calendar_title=calendar_title,
+            unique_id=calc_unique_scraper_id(source_name, source_args),
+        )
+
+        return g
+
+
+def calc_unique_scraper_id(source_name, source_args):
+    return source_name + str(sorted(source_args.items()))

+ 159 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/service/AbfallnaviDe.py

@@ -0,0 +1,159 @@
+#!/usr/bin/env python3
+
+import datetime
+import json
+
+import requests
+
+SERVICE_DOMAINS = {
+    "aachen": "Aachen",
+    "aw-bgl2": "Bergisch Gladbach",
+    "bav": "Bergischer Abfallwirtschaftverbund",
+    "coe": "Kreis Coesfeld",
+    "cottbus": "Cottbus",
+    "din": "Dinslaken",
+    "dorsten": "Dorsten",
+    "gt2": "Gütersloh",
+    "hlv": "Halver",
+    "krhs": "Kreis Heinsberg",
+    "krwaf": "Kreis Warendorf",
+    "lindlar": "Lindlar",
+    "nds": "Norderstedt",
+    "nuernberg": "Nürnberg",
+    "oberhausen": "Oberhausen",
+    "pi": "Kreis Pinneberg",
+    "roe": "Roetgen",
+    "solingen": "Solingen",
+    "stl": "Lüdenscheid",
+    "straelen": "Straelen",
+    "viersen": "Kreis Viersen",
+    "wml2": "EGW Westmünsterland",
+    "zew2": "AWA Entsorgungs GmbH",
+}
+
+
+class AbfallnaviDe:
+    def __init__(self, service_domain):
+        self._service_domain = service_domain
+        self._service_url = f"https://{service_domain}-abfallapp.regioit.de/abfall-app-{service_domain}/rest"
+
+    def _fetch(self, path, params=None):
+        r = requests.get(f"{self._service_url}/{path}", params=params)
+        r.encoding = "utf-8"  # requests doesn't guess the encoding correctly
+        return r.text
+
+    def _fetch_json(self, path, params=None):
+        return json.loads(self._fetch(path, params=params))
+
+    def get_cities(self):
+        """Return all cities of service domain."""
+        cities = self._fetch_json("orte")
+        result = {}
+        for city in cities:
+            result[city["id"]] = city["name"]
+        return result
+
+    def get_city_id(self, city):
+        """Return id for given city string."""
+        cities = self.get_cities()
+        return self._find_in_inverted_dict(cities, city)
+
+    def get_streets(self, city_id):
+        """Return all streets of a city."""
+        streets = self._fetch_json(f"orte/{city_id}/strassen")
+        result = {}
+        for street in streets:
+            result[street["id"]] = street["name"]
+        return result
+
+    def get_street_id(self, city_id, street):
+        """Return id for given street string."""
+        streets = self.get_streets(city_id)
+        return self._find_in_inverted_dict(streets, street)
+
+    def get_house_numbers(self, street_id):
+        """Return all house numbers of a street."""
+        house_numbers = self._fetch_json(f"strassen/{street_id}")
+        result = {}
+        for hausNr in house_numbers.get("hausNrList", {}):
+            # {"id":5985445,"name":"Adalbert-Stifter-Straße","hausNrList":[{"id":5985446,"nr":"1"},
+            result[hausNr["id"]] = hausNr["nr"]
+        return result
+
+    def get_house_number_id(self, street_id, house_number):
+        """Return id for given house number string."""
+        house_numbers = self.get_house_numbers(street_id)
+        return self._find_in_inverted_dict(house_numbers, house_number)
+
+    def get_waste_types(self):
+        waste_types = self._fetch_json("fraktionen")
+        result = {}
+        for waste_type in waste_types:
+            result[waste_type["id"]] = waste_type["name"]
+        return result
+
+    def _get_dates(self, target, id, waste_types=None):
+        # retrieve collections
+        args = []
+
+        if waste_types is None:
+            waste_types = self.get_waste_types()
+
+        for f in waste_types.keys():
+            args.append(("fraktion", f))
+
+        results = self._fetch_json(f"{target}/{id}/termine", params=args)
+
+        entries = []
+        for r in results:
+            date = datetime.datetime.strptime(r["datum"], "%Y-%m-%d").date()
+            fraktion = waste_types[r["bezirk"]["fraktionId"]]
+            entries.append([date, fraktion])
+        return entries
+
+    def get_dates_by_street_id(self, street_id):
+        return self._get_dates("strassen", street_id, waste_types=None)
+
+    def get_dates_by_house_number_id(self, house_number_id):
+        return self._get_dates("hausnummern", house_number_id, waste_types=None)
+
+    def get_dates(self, city, street, house_number=None):
+        """Get dates by strings only for convenience."""
+        # find city_id
+        city_id = self.get_city_id(city)
+        if city_id is None:
+            raise Exception(f"No id found for city: {city}")
+
+        # find street_id
+        street_id = self.get_street_id(city_id, street)
+        if street_id is None:
+            raise Exception(f"No id found for street: {street}")
+
+        # find house_number_id (which is optional: not all house number do have an id)
+        house_number_id = self.get_house_number_id(street_id, house_number)
+
+        # return dates for specific house number of street if house number
+        # doesn't have an own id
+        if house_number_id is not None:
+            return self.get_dates_by_house_number_id(house_number_id)
+        else:
+            return self.get_dates_by_street_id(street_id)
+
+    def _find_in_inverted_dict(self, mydict, value):
+        inverted_dict = dict(map(reversed, mydict.items()))
+        return inverted_dict.get(value)
+
+
+def main():
+    aachen = AbfallnaviDe("aachen")
+    print(aachen.get_dates("Aachen", "Abteiplatz", "7"))
+
+    lindlar = AbfallnaviDe("lindlar")
+    print(lindlar.get_dates("Lindlar", "Aggerweg"))
+
+    roe = AbfallnaviDe("roe")
+    print(roe.get_dates("Roetgen", "Am Sportplatz", "2"))
+
+
+if __name__ == "__main__":
+    main()

+ 69 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/service/EcoHarmonogramPL.py

@@ -0,0 +1,69 @@
+import sys
+
+import requests
+
+towns_url = "https://ecoharmonogram.pl/api/api.php?action=getTowns"
+scheduled_periods_url = "https://ecoharmonogram.pl/api/api.php?action=getSchedulePeriods"
+streets_url = "https://ecoharmonogram.pl/api/api.php?action=getStreets"
+schedules_url = "https://ecoharmonogram.pl/api/api.php?action=getSchedules"
+
+headers = {
+    'Content-Type': 'application/json; charset=utf-8',
+    'Accept': 'application/json',
+}
+
+
+class Ecoharmonogram:
+    @staticmethod
+    def fetch_schedules(sp, street):
+        payload = {'streetId': street.get("id"), 'schedulePeriodId': sp.get("id")}
+        schedules_response = requests.get(
+            schedules_url,
+            headers=headers, params=payload)
+        schedules_response.encoding = "utf-8-sig"
+        schedules_response = schedules_response.json()
+        return schedules_response
+
+    @staticmethod
+    def fetch_streets(sp, town, street, house_number):
+        payload = {'streetName': str(street), 'number': str(house_number), 'townId': town.get("id"),
+                   'schedulePeriodId': sp.get("id")}
+
+        streets_response = requests.get(
+            streets_url, headers=headers, params=payload)
+        streets_response.encoding = "utf-8-sig"
+        streets = streets_response.json().get("streets")
+        return streets
+
+    @staticmethod
+    def fetch_scheduled_periods(town):
+        payload = {'townId': town.get("id")}
+        scheduled_perionds_response = requests.get(scheduled_periods_url, headers=headers, params=payload)
+        scheduled_perionds_response.encoding = "utf-8-sig"
+        schedule_periods_data = scheduled_perionds_response.json()
+        return schedule_periods_data
+
+    @staticmethod
+    def fetch_town():
+        town_response = requests.get(towns_url, headers=headers)
+        town_response.encoding = "utf-8-sig"
+        town_data = town_response.json()
+        return town_data
+
+    @staticmethod
+    def print_possible_sides(town_input, street_input, house_number_input):
+        town_data = Ecoharmonogram.fetch_town()
+        matching_towns = filter(lambda x: town_input.lower() in x.get('name').lower(), town_data.get('towns'))
+        town = list(matching_towns)[0]
+
+        schedule_periods_data = Ecoharmonogram.fetch_scheduled_periods(town)
+        schedule_periods = schedule_periods_data.get("schedulePeriods")
+
+        for sp in schedule_periods:
+            streets = Ecoharmonogram.fetch_streets(sp, town, street_input, house_number_input)
+            for street in streets:
+                print(street.get("sides"))
+
+
+if __name__ == '__main__':
+    Ecoharmonogram.print_possible_sides(sys.argv[1], sys.argv[2] or "", sys.argv[3] or "")

+ 71 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/service/ICS.py

@@ -0,0 +1,71 @@
+import datetime
+import logging
+import re
+from typing import Any, List, Optional, Tuple
+
+from icalevents import icalevents
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class ICS:
+    def __init__(
+        self,
+        offset: Optional[int] = None,
+        regex: Optional[str] = None,
+        split_at: Optional[str] = None,
+    ):
+        self._offset = offset
+        self._regex = None
+        self._split_at = None
+
+        if regex is not None:
+            self._regex = re.compile(regex)
+
+        if split_at is not None:
+            self._split_at = re.compile(split_at)
+
+    def convert(self, ics_data: str) -> List[Tuple[datetime.date, str]]:
+        # calculate start- and end-date for recurring events
+        start_date = datetime.datetime.now().replace(
+            hour=0, minute=0, second=0, microsecond=0
+        )
+        if self._offset is not None:
+            start_date -= datetime.timedelta(days=self._offset)
+        end_date = start_date.replace(year=start_date.year + 1)
+
+        # parse ics data
+        events: List[Any] = icalevents.events(
+            start=start_date, end=end_date, string_content=ics_data.encode()
+        )
+
+        entries: List[Tuple[datetime.date, str]] = []
+
+        for e in events:
+            # calculate date
+            dtstart: Optional[datetime.date] = None
+
+            if isinstance(e.start, datetime.datetime):
+                dtstart = e.start.date()
+            elif isinstance(e.start, datetime.date):
+                dtstart = e.start
+
+            # Only continue if a start date can be found in the entry
+            if dtstart is not None:
+                if self._offset is not None:
+                    dtstart += datetime.timedelta(days=self._offset)
+
+                # calculate waste type
+                summary = str(e.summary)
+
+                if self._regex is not None:
+                    if match := self._regex.match(summary):
+                        summary = match.group(1)
+
+                if self._split_at is not None:
+                    summary = re.split(self._split_at, summary)
+                    entries.extend((dtstart, t.strip().title()) for t in summary)
+                else:
+                    entries.append((dtstart, summary))
+
+        return entries

+ 64 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/service/ICS_v1.py

@@ -0,0 +1,64 @@
+import datetime
+import logging
+import re
+
+import icalendar
+import recurring_ical_events
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class ICS_v1:
+    def __init__(self, offset=None, regex=None, split_at=None):
+        self._offset = offset
+        self._regex = None
+        if regex is not None:
+            self._regex = re.compile(regex)
+        self._split_at = split_at
+
+    def convert(self, ics_data):
+        # parse ics file
+        try:
+            calendar = icalendar.Calendar.from_ical(ics_data)
+        except Exception as err:
+            _LOGGER.error(f"Parsing ics data failed:{str(err)}")
+            _LOGGER.debug(ics_data)
+            return []
+
+        # calculate start- and end-date for recurring events
+        start_date = datetime.datetime.now().replace(
+            hour=0, minute=0, second=0, microsecond=0
+        )
+        if self._offset is not None:
+            start_date -= datetime.timedelta(days=self._offset)
+        end_date = start_date.replace(year=start_date.year + 1)
+
+        events = recurring_ical_events.of(calendar).between(start_date, end_date)
+
+        entries = []
+        for e in events:
+            if e.name == "VEVENT":
+                # calculate date
+                dtstart = None
+                if type(e.get("dtstart").dt) == datetime.date:
+                    dtstart = e.get("dtstart").dt
+                elif type(e.get("dtstart").dt) == datetime.datetime:
+                    dtstart = e.get("dtstart").dt.date()
+                if self._offset is not None:
+                    dtstart += datetime.timedelta(days=self._offset)
+
+                # calculate waste type
+                summary = str(e.get("summary"))
+                if self._regex is not None:
+                    match = self._regex.match(summary)
+                    if match:
+                        summary = match.group(1)
+
+                if self._split_at is not None:
+                    summary = re.split(self._split_at, summary)
+                    for t in summary:
+                        entries.append((dtstart, t.strip().title()))
+                else:
+                    entries.append((dtstart, summary))
+
+        return entries

+ 0 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/service/__init__.py


BIN
custom_components/waste_collection_schedule/waste_collection_schedule/source/__pycache__/awido_de.cpython-310.pyc


+ 181 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/a_region_ch.py

@@ -0,0 +1,181 @@
+import datetime
+from urllib.parse import parse_qs, urlparse
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "A-Region"
+DESCRIPTION = "Source for A-Region, Switzerland waste collection."
+URL = "https://www.a-region.ch"
+TEST_CASES = {
+    "Andwil": {"municipality": "Andwil"},
+    "Rorschach": {"municipality": "Rorschach", "district": "Unteres Stadtgebiet"},
+    "Wolfhalden": {"municipality": "Wolfhalden"},
+}
+
+BASE_URL = "https://www.a-region.ch"
+
+MUNICIPALITIES = {
+    "Andwil": "/index.php?ref=search&refid=13875680&apid=5011362",
+    "Appenzell": "/index.php?ref=search&refid=13875680&apid=7502696",
+    "Berg": "/index.php?ref=search&refid=13875680&apid=3106981",
+    "Bühler": "/index.php?ref=search&refid=13875680&apid=4946039",
+    "Eggersriet": "/index.php?ref=search&refid=13875680&apid=7419807",
+    "Gais": "/index.php?ref=search&refid=13875680&apid=7001813",
+    "Gaiserwald": "/index.php?ref=search&refid=13875680&apid=9663627",
+    "Goldach": "/index.php?ref=search&refid=13875680&apid=1577133",
+    "Grub": "/index.php?ref=search&refid=13875680&apid=10619556",
+    "Heiden": "/index.php?ref=search&refid=13875680&apid=13056683",
+    "Herisau": "/index.php?ref=search&refid=13875680&apid=10697513",
+    "Horn": "/index.php?ref=search&refid=13875680&apid=7102181",
+    "Hundwil": "/index.php?ref=search&refid=13875680&apid=7705668",
+    "Häggenschwil": "/index.php?ref=search&refid=13875680&apid=1590277",
+    "Lutzenberg": "/index.php?ref=search&refid=13875680&apid=301262",
+    "Muolen": "/index.php?ref=search&refid=13875680&apid=9000564",
+    "Mörschwil": "/index.php?ref=search&refid=13875680&apid=12765590",
+    "Rehetobel": "/index.php?ref=search&refid=13875680&apid=15824437",
+    "Rorschach": "/index.php?ref=search&refid=13875680&apid=7773833",
+    "Rorschacherberg": "/index.php?ref=search&refid=13875680&apid=13565317",
+    "Schwellbrunn": "/index.php?ref=search&refid=13875680&apid=10718116",
+    "Schönengrund": "/index.php?ref=search&refid=13875680&apid=8373248",
+    "Speicher": "/index.php?ref=search&refid=13875680&apid=11899879",
+    "Stein": "/index.php?ref=search&refid=13875680&apid=9964399",
+    "Steinach": "/index.php?ref=search&refid=13875680&apid=16358152",
+    "Teufen": "/index.php?ref=search&refid=13875680&apid=662596",
+    "Thal": "/index.php?ref=search&refid=13875680&apid=5087375",
+    "Trogen": "/index.php?ref=search&refid=13875680&apid=14835149",
+    "Tübach": "/index.php?ref=search&refid=13875680&apid=6762782",
+    "Untereggen": "/index.php?ref=search&refid=13875680&apid=5661056",
+    "Urnäsch": "/index.php?ref=search&refid=13875680&apid=1891722",
+    "Wald": "/index.php?ref=search&refid=13875680&apid=4214292",
+    "Waldkirch": "/index.php?ref=search&refid=13875680&apid=15180335",
+    "Waldstatt": "/index.php?ref=search&refid=13875680&apid=15561367",
+    "Wittenbach": "/index.php?ref=search&refid=13875680&apid=13277954",
+    "Wolfhalden": "/index.php?ref=search&refid=13875680&apid=5642491",
+}
+
+
+class Source:
+    def __init__(self, municipality, district=None):
+        self._municipality = municipality
+        self._district = district
+
+    def fetch(self):
+        # municipalities = self.get_municipalities()
+        municipalities = MUNICIPALITIES
+        if self._municipality not in municipalities:
+            raise Exception(f"municipality '{self._municipality}' not found")
+
+        waste_types = self.get_waste_types(municipalities[self._municipality])
+
+        entries = []
+
+        for (waste_type, link) in waste_types.items():
+            dates = self.get_dates(link)
+
+            for d in dates:
+                entries.append(Collection(d, waste_type))
+
+        return entries
+
+    def get_municipalities(self):
+        municipalities = {}
+
+        # get PHPSESSID
+        session = requests.session()
+        r = session.get(f"{BASE_URL}")
+        r.raise_for_status()
+
+        # cookies = {'PHPSESSID': requests.utils.dict_from_cookiejar(r.cookies)['PHPSESSID']}
+
+        params = {"apid": "13875680", "apparentid": "4618613"}
+        r = session.get(f"{BASE_URL}/index.php", params=params)
+        r.raise_for_status()
+        self.extract_municipalities(r.text, municipalities)
+
+        page = 1
+        while True:
+            params = {
+                "do": "searchFetchMore",
+                "hash": "606ee79ca61fc6eef434ab4fca0d5956",
+                "p": page,
+            }
+            headers = {
+                "cookie": "PHPSESSID=71v67j0et4ih04qa142d402ebm;"
+            }  # TODO: get cookie from first request
+            r = session.get(
+                f"{BASE_URL}/appl/ajax/index.php", params=params, headers=headers
+            )
+            r.raise_for_status()
+            if r.text == "":
+                break
+            self.extract_municipalities(r.text, municipalities)
+            page = page + 1
+        return municipalities
+
+    def extract_municipalities(self, text, municipalities):
+        soup = BeautifulSoup(text, features="html.parser")
+        downloads = soup.find_all("a", href=True)
+        for download in downloads:
+            # href ::= "/index.hp"
+            href = download.get("href")
+            if "ref=search" in href:
+                for title in download.find_all("div", class_="title"):
+                    # title ::= "Abfallkalender Andwil"
+                    municipalities[title.string.removeprefix("Abfallkalender ")] = href
+
+    def get_waste_types(self, link):
+        r = requests.get(f"{BASE_URL}{link}")
+        r.raise_for_status()
+
+        waste_types = {}
+
+        soup = BeautifulSoup(r.text, features="html.parser")
+        downloads = soup.find_all("a", href=True)
+        for download in downloads:
+            # href ::= "/index.php?apid=12731252&amp;apparentid=5011362"
+            href = download.get("href")
+            if "apparentid" in href:
+                for title in download.find_all("div", class_="title"):
+                    # title ::= "Altmetall"
+                    waste_types[title.string] = href
+
+        return waste_types
+
+    def get_dates(self, link):
+        r = requests.get(f"{BASE_URL}{link}")
+        r.raise_for_status()
+
+        soup = BeautifulSoup(r.text, features="html.parser")
+
+        # check for additional districts
+        districts = {}
+        downloads = soup.find_all("a", href=True)
+        for download in downloads:
+            href = download.get("href")
+            if "apparentid" in href:
+                title = download.find("div", class_="title")
+                if title is not None:
+                    # additional district found ->
+                    districts[title.string.split(": ")[1]] = href
+        if len(districts) > 0:
+            if self._district is None:
+                raise Exception("district is missing")
+            if self._district not in districts:
+                raise Exception(f"district '{self._district}' not found")
+            return self.get_dates(districts[self._district])
+
+        dates = set()
+
+        downloads = soup.find_all("a", href=True)
+        for download in downloads:
+            # href ::= "/appl/ics.php?apid=12731252&amp;from=2022-05-04%2013%3A00%3A00&amp;to=2022-05-04%2013%3A00%3A00"
+            href = download.get("href")
+            if "ics.php" in href:
+                parsed = urlparse(href)
+                query = parse_qs(parsed.query)
+                date = datetime.datetime.strptime(query["from"][0], "%Y-%m-%d %H:%M:%S")
+                dates.add(date.date())
+
+        return dates

+ 140 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/abfall_io.py

@@ -0,0 +1,140 @@
+import datetime
+from html.parser import HTMLParser
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "AbfallPlus"
+DESCRIPTION = (
+    "Source for AbfallPlus.de waste collection. Service is hosted on abfall.io."
+)
+URL = "https://www.abfallplus.de"
+TEST_CASES = {
+    "Waldenbuch": {
+        "key": "8215c62763967916979e0e8566b6172e",
+        "f_id_kommune": 2999,
+        "f_id_strasse": 1087,
+        # "f_abfallarten": [50, 53, 31, 299, 328, 325]
+    },
+    "Landshut": {
+        "key": "bd0c2d0177a0849a905cded5cb734a6f",
+        "f_id_kommune": 2655,
+        "f_id_bezirk": 2655,
+        "f_id_strasse": 763,
+        # "f_abfallarten": [31, 17, 19, 218]
+    },
+    "Schoenmackers": {
+        "key": "e5543a3e190cb8d91c645660ad60965f",
+        "f_id_kommune": 3682,
+        "f_id_strasse": "3682adenauerplatz",
+        "f_id_strasse_hnr": "20417",
+        # "f_abfallarten": [691,692,696,695,694,701,700,693,703,704,697,699],
+    },
+    "Freudenstadt": {
+        "key": "595f903540a36fe8610ec39aa3a06f6a",
+        "f_id_kommune": 3447,
+        "f_id_bezirk": 22017,
+        "f_id_strasse": 22155,
+    },
+    "Ludwigshafen am Rhein": {
+        "key": "6efba91e69a5b454ac0ae3497978fe1d",
+        "f_id_kommune": "5916",
+        "f_id_strasse": "5916abteistrasse",
+        "f_id_strasse_hnr": 33,
+    },
+    "Traunstein": {
+        "key": "279cc5db4db838d1cfbf42f6f0176a90",
+        "f_id_kommune": "2911",
+        "f_id_strasse": "2374",
+    },
+}
+
+MODUS_KEY = "d6c5855a62cf32a4dadbc2831f0f295f"
+HEADERS = {"user-agent": "Mozilla/5.0 (xxxx Windows NT 10.0; Win64; x64)"}
+
+
+# Parser for HTML input (hidden) text
+class HiddenInputParser(HTMLParser):
+    def __init__(self):
+        super().__init__()
+        self._args = {}
+
+    @property
+    def args(self):
+        return self._args
+
+    def handle_starttag(self, tag, attrs):
+        if tag == "input":
+            d = dict(attrs)
+            if d["type"] == "hidden":
+                self._args[d["name"]] = d["value"]
+
+
+class Source:
+    def __init__(
+        self,
+        key,
+        f_id_kommune,
+        f_id_strasse,
+        f_id_bezirk=None,
+        f_id_strasse_hnr=None,
+        f_abfallarten=[],
+    ):
+        self._key = key
+        self._kommune = f_id_kommune
+        self._bezirk = f_id_bezirk
+        self._strasse = f_id_strasse
+        self._strasse_hnr = f_id_strasse_hnr
+        self._abfallarten = f_abfallarten  # list of integers
+        self._ics = ICS()
+
+    def fetch(self):
+        # get token
+        params = {"key": self._key, "modus": MODUS_KEY, "waction": "init"}
+
+        r = requests.post("https://api.abfall.io", params=params, headers=HEADERS)
+
+        # add all hidden input fields to form data
+        # There is one hidden field which acts as a token:
+        # It consists of a UUID key and a UUID value.
+        p = HiddenInputParser()
+        p.feed(r.text)
+        args = p.args
+
+        args["f_id_kommune"] = self._kommune
+        args["f_id_strasse"] = self._strasse
+
+        if self._bezirk is not None:
+            args["f_id_bezirk"] = self._bezirk
+
+        if self._strasse_hnr is not None:
+            args["f_id_strasse_hnr"] = self._strasse_hnr
+
+        for i in range(len(self._abfallarten)):
+            args[f"f_id_abfalltyp_{i}"] = self._abfallarten[i]
+
+        args["f_abfallarten_index_max"] = len(self._abfallarten)
+        args["f_abfallarten"] = ",".join(map(lambda x: str(x), self._abfallarten))
+
+        now = datetime.datetime.now()
+        date2 = now.replace(year=now.year + 1)
+        args["f_zeitraum"] = f"{now.strftime('%Y%m%d')}-{date2.strftime('%Y%m%d')}"
+
+        params = {"key": self._key, "modus": MODUS_KEY, "waction": "export_ics"}
+
+        # get csv file
+        r = requests.post(
+            "https://api.abfall.io", params=params, data=args, headers=HEADERS
+        )
+
+        # parse ics file
+        r.encoding = "utf-8"  # requests doesn't guess the encoding correctly
+        ics_file = r.text
+
+        dates = self._ics.convert(ics_file)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries

+ 100 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/abfall_zollernalbkreis_de.py

@@ -0,0 +1,100 @@
+from datetime import datetime
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "Abfall Zollernalbkreis"
+DESCRIPTION = "Source for Abfallwirtschaft Zollernalbkreis waste collection."
+URL = "https://www.abfallkalender-zak.de"
+TEST_CASES = {
+    "Ebingen": {
+        "city": "2,3,4",
+        "street": "3",
+        "types": [
+            "restmuell",
+            "gelbersack",
+            "papiertonne",
+            "biomuell",
+            "gruenabfall",
+            "schadstoffsammlung",
+            "altpapiersammlung",
+            "schrottsammlung",
+            "weihnachtsbaeume",
+            "elektrosammlung",
+        ],
+    },
+    "Erlaheim": {
+        "city": "79",
+        "street": "",
+        "types": [
+            "restmuell",
+            "gelbersack",
+            "papiertonne",
+            "biomuell",
+            "gruenabfall",
+            "schadstoffsammlung",
+            "altpapiersammlung",
+            "schrottsammlung",
+            "weihnachtsbaeume",
+            "elektrosammlung",
+        ],
+    },
+}
+
+
+class Source:
+    def __init__(self, city, types, street=None):
+        self._city = city
+        self._street = street
+        self._types = types
+        self._ics = ICS()
+        self._iconMap  = {
+            "Restmüll": "mdi:trash-can",
+            "Grünabfall" : "mdi:leaf",
+            "Gelber Sack" : "mdi:sack",
+            "Papiertonne" : "mdi:package-variant",
+            "Bildschirm-/Kühlgeräte" : "mdi:television-classic",
+            "Schadstoffsammlung" : "mdi:biohazard",
+            "altmetalle" : "mdi:nail",
+        } 
+
+    def fetch(self):
+        now = datetime.now()
+        entries = self.fetch_year(now.year, self._city, self._street, self._types)
+        if now.month == 12:
+            # also get data for next year if we are already in december
+            try:
+                entries.extend(
+                    self.fetch_year(
+                        (now.year + 1), self._city, self._street, self._types
+                    )
+                )
+            except Exception:
+                # ignore if fetch for next year fails
+                pass
+        return entries
+
+    def fetch_year(self, year, city, street, types):
+        args = {
+            "city": city,
+            "street": street,
+            "year": year,
+            "types[]": types,
+            "go_ics": "Download",
+        }
+
+        # get ics file
+        r = requests.get("https://www.abfallkalender-zak.de", params=args)
+
+        # parse ics file
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            waste_type = d[1]
+            next_pickup_date = d[0]
+            
+            entries.append(Collection(date=next_pickup_date, t=waste_type, icon=self._iconMap.get(waste_type,"mdi:trash-can")))
+
+        return entries

+ 43 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/abfallnavi_de.py

@@ -0,0 +1,43 @@
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.AbfallnaviDe import AbfallnaviDe
+
+TITLE = "AbfallNavi"
+DESCRIPTION = (
+    "Source for AbfallNavi waste collection. AbfallNavi is a brand name of regioit.de."
+)
+URL = "https://www.regioit.de"
+TEST_CASES = {
+    "Aachen, Abteiplatz 7": {
+        "service": "aachen",
+        "ort": "Aachen",
+        "strasse": "Abteiplatz",
+        "hausnummer": "7",
+    },
+    "Lindlar, Aggerweg": {
+        "service": "lindlar",
+        "ort": "Lindlar",
+        "strasse": "Aggerweg",
+    },
+    "Roetgen, Am Sportplatz 2": {
+        "service": "roe",
+        "ort": "Roetgen",
+        "strasse": "Am Sportplatz",
+        "hausnummer": "2",
+    },
+}
+
+
+class Source:
+    def __init__(self, service, ort, strasse, hausnummer=None):
+        self._api = AbfallnaviDe(service)
+        self._ort = ort
+        self._strasse = strasse
+        self._hausnummer = hausnummer
+
+    def fetch(self):
+        dates = self._api.get_dates(self._ort, self._strasse, self._hausnummer)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries

+ 107 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/alw_wf_de.py

@@ -0,0 +1,107 @@
+import datetime
+import json
+
+import pytz
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "ALW Wolfenbüttel"
+DESCRIPTION = "Source for ALW Wolfenbüttel."
+URL = "https://abfallapp.alw-wf.de"
+TEST_CASES = {
+    "Linden alte Straße": {"ort": "Linden mit Okertalsiedlung", "strasse": "Siedlung"},
+    "Linden neuere Straße": {
+        "ort": "Linden mit Okertalsiedlung",
+        "strasse": "Kleingartenweg",
+    },
+    "Dettum": {"ort": "Dettum", "strasse": "Egal!"},
+}
+
+AUTH_DATA = {
+    "auth": {
+        "Name": "ALW",
+        "Version": "2.0",
+        "AuthKey": "ALW",
+        "DeviceID": "ALW",
+        "Username": "ALW",
+        "Password": "ALW",
+    },
+    "request": {},
+}
+ALL_STREETS = "Alle Straßen"
+BIN_TYPE_NORMAL = "0"
+
+
+class Source:
+    def __init__(self, ort, strasse):
+        self._ort = ort
+        self._strasse = strasse
+
+    def fetch(self):
+        auth_params = json.dumps(AUTH_DATA)
+
+        # ALW WF uses a self-signed certificate so we need to disable certificate verification
+        r = requests.post(f"{URL}/GetOrte.php", data=auth_params, verify=False)
+        orte = r.json()
+        if orte["result"][0]["StatusCode"] != 200:
+            raise Exception(f"Error getting Orte: {orte['result'][0]['StatusMsg']}")
+
+        orte = orte["result"][0]["result"]
+        orte = {i["Name"]: i["ID"] for i in orte}
+        ort_id = orte.get(self._ort, None)
+
+        if ort_id is None:
+            raise Exception(f"Error finding Ort {self._ort}")
+
+        r = requests.post(f"{URL}/GetStrassen.php", data=auth_params, verify=False)
+        strassen = r.json()
+        if strassen["result"][0]["StatusCode"] != 200:
+            raise Exception(
+                f"Error getting Straßen: {strassen['result'][0]['StatusMsg']}"
+            )
+
+        strassen = strassen["result"][0]["result"]
+        strasse_id = None
+        for strasse in strassen:
+            if strasse["OrtID"] != ort_id:
+                continue
+            if strasse["Name"] == ALL_STREETS or strasse["Name"] == self._strasse:
+                strasse_id = strasse["ID"]
+                break
+            continue
+
+        if strasse_id is None:
+            raise Exception(f"Error finding Straße {self._strasse}")
+
+        r = requests.post(f"{URL}/GetArten.php", data=auth_params, verify=False)
+        arten = r.json()
+        if arten["result"][0]["StatusCode"] != 200:
+            raise Exception(f"Error getting Arten: {arten['result'][0]['StatusMsg']}")
+
+        arten = arten["result"][0]["result"]
+        arten = [i for i in arten if i["Art"] == BIN_TYPE_NORMAL]
+        arten = {int(i["Wertigkeit"]): i["Name"] for i in arten}
+
+        entries = []
+        r = requests.post(
+            f"{URL}/GetTermine.php/{strasse_id}", data=auth_params, verify=False
+        )
+        termine = r.json()
+        if termine["result"][0]["StatusCode"] != 200:
+            raise Exception(
+                f"Error getting Termine: {termine['result'][0]['StatusMsg']}"
+            )
+
+        termine = termine["result"][0]["result"]
+        for termin in termine:
+            ts = int(termin["DatumLong"]) / 1000
+            # Timestamps are unix with milliseconds but not UTC...
+            date = datetime.datetime.fromtimestamp(
+                ts, tz=pytz.timezone("Europe/Berlin")
+            ).date()
+            types = int(termin["Abfallwert"])
+            for art in arten:
+                if types & art:
+                    entries.append(Collection(date, arten[art]))
+
+        return entries

+ 118 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/aucklandcouncil_govt_nz.py

@@ -0,0 +1,118 @@
+import datetime
+from html.parser import HTMLParser
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Auckland council"
+DESCRIPTION = "Source for Auckland council."
+URL = "https://aucklandcouncil.govt.nz"
+TEST_CASES = {
+    "429 Sea View Road": {"area_number": "12342453293"},  # Monday
+    "8 Dickson Road": {"area_number": "12342306525"},  # Thursday
+}
+
+MONTH = {
+    "January": 1,
+    "February": 2,
+    "March": 3,
+    "April": 4,
+    "May": 5,
+    "June": 6,
+    "July": 7,
+    "August": 8,
+    "September": 9,
+    "October": 10,
+    "November": 11,
+    "December": 12,
+}
+
+
+def toDate(formattedDate):
+    items = formattedDate.split()
+    return datetime.date(int(items[3]), MONTH[items[2]], int(items[1]))
+
+
+# Parser for <div> element with class wasteSearchResults
+class WasteSearchResultsParser(HTMLParser):
+    def __init__(self):
+        super().__init__()
+        self._entries = []
+        self._wasteType = None
+        self._withinWasteDateSpan = False
+        self._withinHouseholdDiv = False
+        self._withinRubbishLinks = False
+        self._todaysDate = None
+        self._workingWasteDate = None
+
+    @property
+    def entries(self):
+        return self._entries
+
+    def handle_endtag(self, tag):
+        if tag == "span" and self._withinWasteDateSpan:
+            self._withinWasteDateSpan = False
+        if tag == "div" and self._withinRubbishLinks:
+            self._withinRubbishLinks = False
+            self._workingWasteDate = None
+
+    def handle_starttag(self, tag, attrs):
+        if tag == "div":
+            d = dict(attrs)
+            id = d.get("id", "")
+            if id.endswith("HouseholdBlock"):
+                self._withinHouseholdDiv = True
+            if id.endswith("CommercialBlock"):
+                self._withinHouseholdDiv = False
+
+        if self._withinHouseholdDiv:
+            s = dict(attrs)
+            className = s.get("class", "")
+            if tag == "div":
+                if className == "links":
+                    self._withinRubbishLinks = True
+                else:
+                    self._withinRubbishLinks = False
+
+            if tag == "span":
+                if className.startswith("m-r-1"):
+                    self._withinWasteDateSpan = True
+
+                if self._workingWasteDate is not None:
+                    if className.startswith("icon-rubbish") or className.startswith(
+                        "icon-recycle"
+                    ):
+                        type = s["class"][5:]  # remove "icon-"
+                        self._entries.append(Collection(self._workingWasteDate, type))
+
+    def handle_data(self, data):
+        # date span comes first, doesn't have a year
+        if self._withinWasteDateSpan:
+            todays_date = datetime.date.today()
+            # use current year, unless Jan is in data, and we are still in Dec
+            year = todays_date.year
+            if "January" in data and todays_date.month == 12:
+                # then add 1
+                year = year + 1
+            fullDate = data + " " + f"{year}"
+            self._workingWasteDate = toDate(fullDate)
+
+
+class Source:
+    def __init__(
+        self, area_number,
+    ):
+        self._area_number = area_number
+
+    def fetch(self):
+        # get token
+        params = {"an": self._area_number}
+
+        r = requests.get(
+            "https://www.aucklandcouncil.govt.nz/rubbish-recycling/rubbish-recycling-collections/Pages/collection-day-detail.aspx",
+            params=params,
+            verify=False,
+        )
+        p = WasteSearchResultsParser()
+        p.feed(r.text)
+        return p.entries

+ 93 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/avl_ludwigsburg_de.py

@@ -0,0 +1,93 @@
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "avl-ludwigsburg.de"
+DESCRIPTION = "Abfallverwertungsgesellschaft des Landkreises Ludwigsburg mbH"
+URL = "https://www.avl-ludwigsburg.de/privatkunden/termine/abfallkalender/suche/"
+
+TEST_CASES = {
+    "CityWithoutStreet": {"city": "Möglingen"},
+    "CityWithStreet": {"city": "Ludwigsburg", "street": "Bahnhofstraße"},
+}
+
+
+class Source:
+    def __init__(self, city, street=None):
+        self._city = city
+        self._street = street
+        self._ics = ICS()
+
+    def fetch(self):
+        # Get the hidden parameters by loading the page
+        session = requests.Session()
+        r = session.get(URL)
+        r.raise_for_status()
+
+        soup = BeautifulSoup(r.text, features="html.parser")
+        hidden_tags = soup.find_all("input", type="hidden")
+
+        # Prepare data for the real web request
+        data = {}
+        for tag in hidden_tags:
+            data[tag.get("name")] = tag.get("value")
+
+        # Find the cities which do need a street name
+        data_cities_with_streets = soup.find_all(
+            "input", type="text", placeholder="Ort eingeben"
+        )
+        cities_with_streets = ""
+        for tag in data_cities_with_streets:
+            cities_with_streets += tag.get("data-cities-with-streets")
+        cities_with_streets = cities_with_streets.split(",")
+
+        data["tx_avlcollections_pi5[wasteCalendarLocationItem]"] = self._city
+        data["tx_avlcollections_pi5[wasteCalendarStreetItem]"] = self._street
+
+        # Remove some data which the webserver doesn't like
+        data.pop("id", None)
+        data.pop("tx_kesearch_pi1[page]", None)
+        data.pop("tx_kesearch_pi1[resetFilters]", None)
+        data.pop("tx_kesearch_pi1[sortByField]", None)
+        data.pop("tx_kesearch_pi1[sortByDir]", None)
+
+        # Depending on the city remove the street from the data set
+        if self._city.lower() not in cities_with_streets:
+            data.pop("tx_avlcollections_pi5[wasteCalendarStreetItem]", None)
+
+        # Get the final data
+        r = session.post(URL, data=data)
+        r.raise_for_status()
+
+        if r.text.find("Ort konnte nicht gefunden werden.") != -1:
+            raise Exception("Error: Ort konnte nicht gefunden werden.")
+
+        if r.text.find("Straße konnte nicht gefunden werden.") != -1:
+            raise Exception("Error: Ort konnte nicht gefunden werden.")
+
+        if r.text.find(".ics") == -1:
+            raise Exception("Error: No ics link found.")
+
+        soup = BeautifulSoup(r.text, features="html.parser")
+        downloads = soup.find_all("a", href=True)
+        ics_link = ""
+        for download in downloads:
+            link = download.get("href")
+            if ".ics" in link:
+                ics_link = link
+        full_url = "https://www.avl-ludwigsburg.de" + ics_link
+        return self.fetch_ics(full_url)
+
+    def fetch_ics(self, url):
+        r = requests.get(url)
+        r.raise_for_status()
+
+        # Parse ics file
+        r.encoding = "utf-8"
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries

+ 134 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py

@@ -0,0 +1,134 @@
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "AW Harburg"
+DESCRIPTION = "Abfallwirtschaft Landkreis Harburg"
+URL = "https://www.landkreis-harburg.de/bauen-umwelt/abfallwirtschaft/abfallkalender/"
+
+TEST_CASES = {
+    "CityWithTwoLevels": {"level_1": "Hanstedt", "level_2": "Evendorf"},
+    "CityWithThreeLevels": {
+        "level_1": "Buchholz",
+        "level_2": "Buchholz mit Steinbeck (ohne Reindorf)",
+        "level_3": "Seppenser Mühlenweg Haus-Nr. 1 / 2",
+    },
+}
+
+HEADERS = {
+    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64)",
+}
+
+
+class Source:
+    def __init__(self, level_1, level_2, level_3=None):
+        self._districts = [level_1, level_2, level_3]
+        self._ics = ICS()
+
+    def fetch(self):
+        # Use a session to keep cookies and stuff
+        session = requests.Session()
+
+        # Get the IDs of the districts on the first level
+        # Double loading is on purpose because sometimes the webpage has an overlay
+        # which is gone on the second try in a session
+        r = session.get(URL, headers=HEADERS)
+        if "Zur aufgerufenen Seite" in r.text:
+            r = session.get(URL, headers=HEADERS)
+        if r.status_code != 200:
+            raise Exception(f"Error: failed to fetch first url: {URL}")
+
+        # Get the IDs of the districts on the first level
+        id = self.parse_level(r.text, 1)
+
+        # Get the IDs of the districts on the second level
+        url = (
+            "https://www.landkreis-harburg.de/ajax/abfall_gebiete_struktur_select.html"
+        )
+        params = {
+            "parent": id,
+            "ebene": 1,
+            "portal": 1,
+            "selected_ebene": 0,
+        }
+        r = session.get(url, params=params, headers=HEADERS)
+        if r.status_code != 200:
+            raise Exception(f"Error: failed to fetch second url: {url}")
+
+        # Get the IDs of the districts on the second level
+        id = self.parse_level(r.text, 2)
+
+        # Get the IDs of the third level - if applicable
+        if self._districts[3 - 1] is not None:
+            # Get the IDs of the districts on the third level
+            params = {
+                "parent": id,
+                "ebene": 2,
+                "portal": 1,
+                "selected_ebene": 0,
+            }
+            r = session.get(url, params=params, headers=HEADERS)
+            if r.status_code != 200:
+                raise Exception(f"Error: failed to fetch third url: {url}")
+
+            # Get the IDs of the districts on the third level
+            id = self.parse_level(r.text, 3)
+
+        # Prepare data for the real web request
+        url = "https://www.landkreis-harburg.de/abfallkalender/abfallkalender_struktur_daten_suche.html"
+        params = {
+            "selected_ebene": id,
+            "owner": 20100,
+        }
+        r = session.get(url, params=params, headers=HEADERS)
+
+        # Sometimes there is no garbage calendar available
+        if "Es sind keine Abfuhrbezirke hinterlegt." in r.text:
+            raise Exception(
+                f'Error: "Es sind keine Abfuhrbezirke hinterlegt." for "{self._districts[3-1]}". Please use different input data.'
+            )
+
+        soup = BeautifulSoup(r.text, features="html.parser")
+        links = soup.find_all("a")
+        ical_url = ""
+        for any_link in links:
+            if " als iCal" in any_link.text:
+                ical_url = any_link.get("href")
+
+        if "ical.html" not in ical_url:
+            raise Exception("No ical Link in the result: " + str(links))
+
+        # Get the final data
+        r = requests.get(ical_url, headers=HEADERS)
+        if not r.ok:
+            raise Exception(f"Error: failed to fetch url: {ical_url}")
+
+        # Parse ics file
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries
+
+    def parse_level(self, response, level):
+        soup = BeautifulSoup(response, features="html.parser")
+        select_content = soup.find_all("select", id=f"strukturEbene{level}")
+        soup = BeautifulSoup(str(select_content), features="html.parser")
+        options_content = soup.find_all("option")
+        level_ids = {}
+        for option in options_content:
+            # Ignore the "Bitte wählen..."
+            if option.get("value") != "0":
+                level_ids[option.text] = option.get("value")
+
+        if level_ids == {}:
+            raise Exception(f"Error: Level {level} Dictionary empty")
+
+        if self._districts[level - 1] not in level_ids:
+            raise Exception(
+                f"Error: District {self._districts[level]} is not in the dictionary: {level_ids}"
+            )
+
+        return level_ids[self._districts[level - 1]]

+ 54 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_bad_kreuznach_de.py

@@ -0,0 +1,54 @@
+import datetime
+import json
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "AWB Bad Kreuznach"
+DESCRIPTION = "Source for AWB Bad Kreuznach."
+URL = "https://app.awb-bad-kreuznach.de/"
+TEST_CASES = {
+    "Hargesheim": {"ort": "Hargesheim", "strasse": "Winzenheimer Straße", "nummer": 16}
+}
+
+TYPES = ("restmuell", "bio", "wert", "papier")
+
+
+class Source:
+    def __init__(self, ort, strasse, nummer):
+        self._ort = ort
+        self._strasse = strasse
+        self._nummer = nummer
+
+    def fetch(self):
+        args = {
+            "ort": self._ort,
+            "strasse": self._strasse,
+            "nummer": self._nummer,
+        }
+
+        # get latitude/longitude file
+        r = requests.post(
+            "https://app.awb-bad-kreuznach.de/api/checkAddress.php", data=args
+        )
+        data = json.loads(r.text)
+
+        # get dates
+        del args["nummer"]
+        args["mode"] = "web"
+        args["lat"] = data["lat"]
+        args["lon"] = data["lon"]
+        r = requests.post(
+            "https://app.awb-bad-kreuznach.de/api/loadDates.php", data=args
+        )
+        data = json.loads(r.text)
+
+        entries = []
+
+        for d in data["termine"]:
+            date = datetime.date.fromisoformat(d["termin"])
+            for type in TYPES:
+                if d[type] != "0":
+                    entries.append(Collection(date, type, date))
+
+        return entries

+ 60 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_es_de.py

@@ -0,0 +1,60 @@
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "Abfallwirtschaftsbetrieb Esslingen"
+DESCRIPTION = "Source for AWB Esslingen, Germany"
+URL = "https://www.awb-es.de"
+
+TEST_CASES = {
+    "Aichwald": {"city": "Aichwald", "street": "Alte Dorfstrasse"},
+    "Kohlberg": {"city": "Kohlberg", "street": "alle Straßen"},
+}
+
+HEADERS = {"user-agent": "Mozilla/5.0 (xxxx Windows NT 10.0; Win64; x64)"}
+
+
+class Source:
+    def __init__(self, city, street=None):
+        self._city = city
+        self._street = street
+        self._ics = ICS()
+
+    def fetch(self):
+        session = requests.Session()
+
+        params = {
+            "city": self._city,
+            "street": self._street,
+            "direct": "true",
+        }
+        r = session.get(
+            "https://www.awb-es.de/abfuhr/abfuhrtermine/__Abfuhrtermine.html",
+            params=params,
+        )
+        r.raise_for_status()
+
+        soup = BeautifulSoup(r.text, features="html.parser")
+        downloads = soup.find_all("a", href=True)
+        ics_url = None
+        for download in downloads:
+            href = download.get("href")
+            if "t=ics" in href:
+                ics_url = href
+                break
+
+        if ics_url is None:
+            raise Exception(f"ics url not found")
+
+        # get ics file
+        r = session.get(ics_url, headers=HEADERS)
+        r.raise_for_status()
+
+        # parse ics file
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries

+ 62 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_lm_de.py

@@ -0,0 +1,62 @@
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "Abfallwirtschaftsbetrieb Limburg-Weilburg"
+DESCRIPTION = "Source for AWB Limburg-Weilburg, Germany"
+URL = "https://www.awb-lm.de/"
+
+TEST_CASES = {
+    "Bad Camberg - Schillerstr.": { "district":  1, "city": 47, "street": 1384},
+    "Limburg - Goethestr.": { "district":  9, "city": 52, "street": 1538}
+}
+
+HEADERS = {"user-agent": "Mozilla/5.0 (xxxx Windows NT 10.0; Win64; x64)"}
+
+
+class Source:
+    def __init__(self, district, city, street=None):
+        self._district = district
+        self._city = city
+        self._street = street
+        self._ics = ICS()
+
+    def fetch(self):
+        session = requests.Session()
+
+        params = {
+            "Abfuhrbezirk": self._district,
+            "Ortschaft": self._city,
+            "Strasse": self._street,
+        }
+
+        r = requests.post(
+             "https://www.awb-lm.de/generator/abfuhrtermine.php",
+             data=params
+        )
+        
+        r.raise_for_status()
+        
+        soup = BeautifulSoup(r.text, features="html.parser")
+        downloads = soup.find_all("a", href=True)
+        ics_url = None
+        for download in downloads:
+            href = download.get("href")
+            if "cache/ical" in href:
+                ics_url = href
+
+        if ics_url is None:
+            raise Exception(f"ics url not found")
+
+        # get ics file
+        r = session.get("https://www.awb-lm.de" + ics_url, headers=HEADERS)
+        r.raise_for_status()
+
+        # parse ics file
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1].split(" am ")[0]))
+        return entries

+ 49 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_oldenburg_de.py

@@ -0,0 +1,49 @@
+import urllib
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "AWB Oldenburg"
+DESCRIPTION = "Source for 'Abfallwirtschaftsbetrieb Stadt Oldenburg (Oldb)'."
+URL = "https://services.oldenburg.de/index.php"
+TEST_CASES = {
+    "Polizeiinspektion Oldenburg": {"street": "Friedhofsweg", "house_number": 30}
+}
+
+
+class Source:
+    def __init__(self, street, house_number):
+        self._street = street
+        self._house_number = house_number
+        self._ics = ICS(regex=r"(.*)\:\s*\!")
+
+    def fetch(self):
+
+        args = {
+            "id": 430,
+            "tx_citkoabfall_abfallkalender[strasse]": str(self._street).encode("utf-8"),
+            "tx_citkoabfall_abfallkalender[hausnummer]": str(self._house_number).encode(
+                "utf-8"
+            ),
+            "tx_citkoabfall_abfallkalender[abfallarten][0]": 61,
+            "tx_citkoabfall_abfallkalender[abfallarten][1]": 60,
+            "tx_citkoabfall_abfallkalender[abfallarten][2]": 59,
+            "tx_citkoabfall_abfallkalender[abfallarten][3]": 58,
+            "tx_citkoabfall_abfallkalender[action]": "ics",
+            "tx_citkoabfall_abfallkalender[controller]": "FrontendIcs",
+        }
+
+        # use '%20' instead of '+' in URL
+        # https://stackoverflow.com/questions/21823965/use-20-instead-of-for-space-in-python-query-parameters
+        args = urllib.parse.urlencode(args, quote_via=urllib.parse.quote)
+
+        # post request
+        r = requests.get(URL, params=args)
+
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries

+ 40 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/awbkoeln_de.py

@@ -0,0 +1,40 @@
+import datetime
+import json
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "AWB Köln"
+DESCRIPTION = "Source for Abfallwirtschaftsbetriebe Köln waste collection."
+URL = "https://www.awbkoeln.de"
+TEST_CASES = {"Koeln": {"street_code": 2, "building_number": 50}}
+
+
+class Source:
+    def __init__(self, street_code, building_number):
+        self._street_code = street_code
+        self._building_number = building_number
+
+    def fetch(self):
+        args = {
+            "street_code": self._street_code,
+            "building_number": self._building_number,
+        }
+
+        now = datetime.datetime.now()
+        args["start_year"] = now.year
+        args["start_month"] = now.month
+        args["end_year"] = now.year + 1
+        args["end_month"] = now.month
+
+        # get json file
+        r = requests.get("https://www.awbkoeln.de/api/calendar", params=args)
+
+        data = json.loads(r.text)
+
+        entries = []
+        for d in data["data"]:
+            date = datetime.date(year=d["year"], month=d["month"], day=d["day"])
+            entries.append(Collection(date, d["type"]))
+
+        return entries

+ 134 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/awido_de.py

@@ -0,0 +1,134 @@
+import datetime
+import json
+import logging
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "AWIDO"
+DESCRIPTION = "Source for AWIDO waste collection."
+URL = "https://www.awido-online.de/"
+TEST_CASES = {
+    "Schorndorf, Miedelsbacher Straße 30 /1": {
+        "customer": "rmk",
+        "city": "Schorndorf",
+        "street": "Miedelsbacher Straße",
+        "housenumber": "30 /1",
+    },
+    "Altomünster, Maisbrunn": {
+        "customer": "lra-dah",
+        "city": "Altomünster",
+        "street": "Maisbrunn",
+    },
+    "SOK-Alsmannsdorf": {"customer": "zaso", "city": "SOK-Alsmannsdorf"},
+    "Kaufbeuren, Rehgrund": {
+        "customer": "kaufbeuren",
+        "city": "Kaufbeuren",
+        "street": "Rehgrund",
+    },
+    "Tübingen, Dettenhausen": {"customer": "tuebingen", "city": "Dettenhausen"},
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(self, customer, city, street=None, housenumber=None):
+        self._customer = customer
+        self._city = city
+        self._street = street
+        self._housenumber = housenumber
+
+    def fetch(self):
+        # Retrieve list of places
+        r = requests.get(
+            f"https://awido.cubefour.de/WebServices/Awido.Service.svc/secure/getPlaces/client={self._customer}"
+        )
+        places = json.loads(r.text)
+
+        # create city to key map from retrieved places
+        city_to_oid = {place["value"].strip(): place["key"] for (place) in places}
+
+        if self._city not in city_to_oid:
+            _LOGGER.error(f"city not found: {self._city}")
+            return []
+
+        oid = city_to_oid[self._city]
+
+        if self._street is None:
+            # test if we have to use city also as street name
+            self._street = self._city
+            r = requests.get(
+                f"https://awido.cubefour.de/WebServices/Awido.Service.svc/secure/getGroupedStreets/{oid}",
+                params={"client": self._customer},
+            )
+            streets = json.loads(r.text)
+
+            # create street to key map from retrieved places
+            street_to_oid = {
+                street["value"].strip(): street["key"] for (street) in streets
+            }
+
+            if self._street in street_to_oid:
+                oid = street_to_oid[self._street]
+
+        else:
+            # street specified
+            r = requests.get(
+                f"https://awido.cubefour.de/WebServices/Awido.Service.svc/secure/getGroupedStreets/{oid}",
+                params={"client": self._customer},
+            )
+            streets = json.loads(r.text)
+
+            # create street to key map from retrieved places
+            street_to_oid = {
+                street["value"].strip(): street["key"] for (street) in streets
+            }
+
+            if self._street not in street_to_oid:
+                _LOGGER.error(f"street not found: {self._street}")
+                return []
+
+            oid = street_to_oid[self._street]
+
+            if self._housenumber is not None:
+                r = requests.get(
+                    f"https://awido.cubefour.de/WebServices/Awido.Service.svc/secure/getStreetAddons/{oid}",
+                    params={"client": self._customer},
+                )
+                hsnbrs = json.loads(r.text)
+
+                # create housenumber to key map from retrieved places
+                hsnbr_to_oid = {
+                    hsnbr["value"].strip(): hsnbr["key"] for (hsnbr) in hsnbrs
+                }
+
+                if self._housenumber not in hsnbr_to_oid:
+                    _LOGGER.error(f"housenumber not found: {self._housenumber}")
+                    return []
+
+                oid = hsnbr_to_oid[self._housenumber]
+
+        # get calendar data
+        r = requests.get(
+            f"https://awido.cubefour.de/WebServices/Awido.Service.svc/secure/getData/{oid}",
+            params={"fractions": "", "client": self._customer},
+        )
+        cal_json = json.loads(r.text)
+
+        # map fraction code to fraction name
+        fractions = {fract["snm"]: fract["nm"] for (fract) in cal_json["fracts"]}
+
+        # calendar also contains public holidays. In this case, 'ad' is None
+        calendar = [item for item in cal_json["calendar"] if item["ad"] is not None]
+
+        entries = []
+        for calitem in calendar:
+            date = datetime.datetime.strptime(calitem["dt"], "%Y%m%d").date()
+
+            # add all fractions for this date
+            for fracitem in calitem["fr"]:
+                waste_type = fractions[fracitem]
+                entries.append(Collection(date, waste_type))
+
+        return entries

+ 117 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/awn_de.py

@@ -0,0 +1,117 @@
+from html.parser import HTMLParser
+
+import requests
+
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "AWN"
+DESCRIPTION = "Source for AWN (Abfallwirtschaft Neckar-Odenwald-Kreis)."
+URL = "https://www.awn-online.de"
+TEST_CASES = {
+    "Adelsheim": {
+        "city": "Adelsheim",
+        "street": "Badstr.",
+        "house_number": 1,
+    },
+    "Mosbach": {
+        "city": "Mosbach",
+        "street": "Hauptstr.",
+        "house_number": 53,
+        "address_suffix": "/1",
+    },
+    "Billigheim": {
+        "city": "Billigheim",
+        "street": "Marienhöhe",
+        "house_number": 5,
+        "address_suffix": "A",
+    },
+}
+SERVLET = (
+    "https://athos.awn-online.de/WasteManagementNeckarOdenwald/WasteManagementServlet"
+)
+
+# Parser for HTML input (hidden) text
+class HiddenInputParser(HTMLParser):
+    def __init__(self):
+        super().__init__()
+        self._args = {}
+
+    @property
+    def args(self):
+        return self._args
+
+    def handle_starttag(self, tag, attrs):
+        if tag == "input":
+            d = dict(attrs)
+            if str(d["type"]).lower() == "hidden":
+                self._args[d["name"]] = d["value"] if "value" in d else ""
+
+
+class Source:
+    def __init__(
+        self, city: str, street: str, house_number: int, address_suffix: str = ""
+    ):
+        self._city = city
+        self._street = street
+        self._hnr = house_number
+        self._suffix = address_suffix
+        self._ics = ICS()
+
+    def fetch(self):
+        session = requests.session()
+
+        r = session.get(
+            SERVLET,
+            params={"SubmitAction": "wasteDisposalServices", "InFrameMode": "TRUE"},
+        )
+        r.raise_for_status()
+        r.encoding = "utf-8"
+
+        parser = HiddenInputParser()
+        parser.feed(r.text)
+
+        args = parser.args
+        args["Ort"] = self._city
+        args["Strasse"] = self._street
+        args["Hausnummer"] = str(self._hnr)
+        args["Hausnummerzusatz"] = self._suffix
+        args["SubmitAction"] = "CITYCHANGED"
+        r = session.post(
+            SERVLET,
+            data=args,
+        )
+        r.raise_for_status()
+
+        args["SubmitAction"] = "forward"
+        args["ContainerGewaehlt_1"] = "on"
+        args["ContainerGewaehlt_2"] = "on"
+        args["ContainerGewaehlt_3"] = "on"
+        args["ContainerGewaehlt_4"] = "on"
+        args["ContainerGewaehlt_5"] = "on"
+        args["ContainerGewaehlt_6"] = "on"
+        args["ContainerGewaehlt_7"] = "on"
+        args["ContainerGewaehlt_8"] = "on"
+        args["ContainerGewaehlt_9"] = "on"
+        args["ContainerGewaehlt_10"] = "on"
+        r = session.post(
+            SERVLET,
+            data=args,
+        )
+        r.raise_for_status()
+
+        args["ApplicationName"] = "com.athos.nl.mvc.abfterm.AbfuhrTerminModel"
+        args["SubmitAction"] = "filedownload_ICAL"
+        r = session.post(
+            SERVLET,
+            data=args,
+        )
+        r.raise_for_status()
+
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+
+        return entries

+ 76 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/awr_de.py

@@ -0,0 +1,76 @@
+import json
+import logging
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "AWR"
+DESCRIPTION = "Source for Abfallwirtschaft Rendsburg"
+URL = "https://www.awr.de"
+TEST_CASES = {
+    "Rendsburg": {"city": "Rendsburg", "street": "Hindenburgstraße"},
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(self, city, street):
+        self._city = city
+        self._street = street
+        self._ics = ICS()
+
+    def fetch(self):
+        # retrieve list of cities
+        r = requests.get("https://www.awr.de/api_v2/collection_dates/1/orte")
+        cities = json.loads(r.text)
+
+        # create city to id map from retrieved cities
+        city_to_id = {
+            city["ortsbezeichnung"]: city["ortsnummer"] for (city) in cities["orte"]
+        }
+
+        if self._city not in city_to_id:
+            _LOGGER.error(f"city not found: {self._city}")
+            return []
+
+        cityId = city_to_id[self._city]
+
+        # retrieve list of streets
+        r = requests.get(
+            f"https://www.awr.de/api_v2/collection_dates/1/ort/{cityId}/strassen"
+        )
+        streets = json.loads(r.text)
+
+        # create street to id map from retrieved cities
+        street_to_id = {
+            street["strassenbezeichnung"]: street["strassennummer"]
+            for (street) in streets["strassen"]
+        }
+
+        if self._street not in street_to_id:
+            _LOGGER.error(f"street not found: {self._street}")
+            return []
+
+        streetId = street_to_id[self._street]
+
+        # retrieve list of waste types
+        r = requests.get(
+            f"https://www.awr.de/api_v2/collection_dates/1/ort/{cityId}/abfallarten"
+        )
+        waste_types = json.loads(r.text)
+        wt = "-".join([t["id"] for t in waste_types["abfallarten"]])
+
+        # get ics file
+        r = requests.get(
+            f"https://www.awr.de/api_v2/collection_dates/1/ort/{cityId}/strasse/{streetId}/hausnummern/0/abfallarten/{wt}/kalender.ics"
+        )
+
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries
+

+ 75 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/awsh_de.py

@@ -0,0 +1,75 @@
+import json
+import logging
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "AWSH"
+DESCRIPTION = "Source for Abfallwirtschaft Südholstein"
+URL = "https://www.awsh.de"
+TEST_CASES = {
+    "Reinbek": {"city": "Reinbek", "street": "Ahornweg"},
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(self, city, street):
+        self._city = city
+        self._street = street
+        self._ics = ICS()
+
+    def fetch(self):
+        # retrieve list of cities
+        r = requests.get("https://www.awsh.de/api_v2/collection_dates/1/orte")
+        cities = json.loads(r.text)
+
+        # create city to id map from retrieved cities
+        city_to_id = {
+            city["ortsbezeichnung"]: city["ortsnummer"] for (city) in cities["orte"]
+        }
+
+        if self._city not in city_to_id:
+            _LOGGER.error(f"city not found: {self._city}")
+            return []
+
+        cityId = city_to_id[self._city]
+
+        # retrieve list of streets
+        r = requests.get(
+            f"https://www.awsh.de/api_v2/collection_dates/1/ort/{cityId}/strassen"
+        )
+        streets = json.loads(r.text)
+
+        # create street to id map from retrieved cities
+        street_to_id = {
+            street["strassenbezeichnung"]: street["strassennummer"]
+            for (street) in streets["strassen"]
+        }
+
+        if self._street not in street_to_id:
+            _LOGGER.error(f"street not found: {self._street}")
+            return []
+
+        streetId = street_to_id[self._street]
+
+        # retrieve list of waste types
+        r = requests.get(
+            f"https://www.awsh.de/api_v2/collection_dates/1/ort/{cityId}/abfallarten"
+        )
+        waste_types = json.loads(r.text)
+        wt = "-".join([t["id"] for t in waste_types["abfallarten"]])
+
+        # get ics file
+        r = requests.get(
+            f"https://www.awsh.de/api_v2/collection_dates/1/ort/{cityId}/strasse/{streetId}/hausnummern/0/abfallarten/{wt}/kalender.ics"
+        )
+
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries

+ 134 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/banyule_vic_gov_au.py

@@ -0,0 +1,134 @@
+import logging
+import re
+import typing
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection
+
+
+TITLE = 'Banyule City Council'
+DESCRIPTION = 'Source for Banyule City Council rubbish collection.'
+URL = 'https://www.banyule.vic.gov.au/binday'
+TEST_CASES = {
+    'Monday A': {'street_address': '6 Mandall Avenue, IVANHOE'},
+    'Monday A Geolocation ID': {'geolocation_id': '4f7ebfca-1526-4363-8b87-df3103a10a87'},
+    'Monday B': {'street_address': '10 Burke Road North, IVANHOE EAST'},
+    'Thursday A': {'street_address': '255 St Helena Road, GREENSBOROUGH'},
+    'Thursday B': {'street_address': '35 Para Road, MONTMORENCY'}
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+ICON_MAP = {
+    'green waste': 'mdi:leaf',
+    'recycling': 'mdi:recycle'
+}
+
+
+class SourceConfigurationError(ValueError):
+    pass
+
+
+class SourceParseError(ValueError):
+    pass
+
+
+class Source:
+    OC_GEOLOCATION_SEARCH_URL = 'https://www.banyule.vic.gov.au/api/v1/myarea/search'
+
+    OC_SESSION_URL = 'https://www.banyule.vic.gov.au/Waste-environment/Waste-recycling/Bin-collection-services'
+    OC_CALENDAR_URL = 'https://www.banyule.vic.gov.au/ocapi/Public/myarea/wasteservices'
+
+    OC_RE_DATE_STR = re.compile(r'[^\s]+\s(\d{1,2}/\d{1,2}/\d{4})')
+
+    def __init__(self, street_address: typing.Optional[str] = None, geolocation_id: typing.Optional[str] = None):
+        if street_address is None and geolocation_id is None:
+            raise SourceConfigurationError('Either street_address or geolocation_id must have a value')
+
+        self._street_address = street_address
+        self._geolocation_id = geolocation_id
+
+    @property
+    def geolocation_id(self) -> str:
+        if self._geolocation_id is None:
+            # Search for geolocation ID
+            geolocation_response = requests.get(
+                self.OC_GEOLOCATION_SEARCH_URL,
+                params={
+                    'keywords': self._street_address,
+                    'maxresults': 1
+                }
+            )
+            geolocation_response.raise_for_status()
+
+            # Pull ID from results
+            geolocation_result = geolocation_response.json()
+            _LOGGER.debug(f"Search response: {geolocation_response!r}")
+
+            if 'success' in geolocation_result and not geolocation_result['success']:
+                raise SourceParseError('Unspecified server-side error when searching address')
+
+            if 'Items' not in geolocation_result or \
+                    geolocation_result['Items'] is None or \
+                    len(geolocation_result['Items']) < 1:
+                raise SourceParseError('Expected list of locations from address search, got empty or missing list')
+
+            geolocation_data = geolocation_result['Items'][0]
+
+            if 'Id' not in geolocation_data:
+                raise SourceParseError('Location in address search result but missing geolocation ID')
+
+            self._geolocation_id = geolocation_data['Id']
+            _LOGGER.info(f"Address {self._street_address} mapped to geolocation ID {self._geolocation_id}")
+
+        return self._geolocation_id
+
+    def fetch(self) -> typing.List[Collection]:
+        # Calendar lookup cares about a cookie, so a Session must be used
+        calendar_session = requests.Session()
+
+        calendar_request = calendar_session.get(self.OC_SESSION_URL)
+        calendar_request.raise_for_status()
+
+        calendar_request = calendar_session.get(
+            self.OC_CALENDAR_URL,
+            params={
+                'geolocationid': self.geolocation_id,
+                'ocsvclang': 'en-AU'
+            }
+        )
+        calendar_request.raise_for_status()
+
+        calendar_result = calendar_request.json()
+        _LOGGER.debug(f"Calendar response: {calendar_result!r}")
+
+        if 'success' in calendar_result and not calendar_result['success']:
+            raise SourceParseError('Unspecified server-side error when getting calendar')
+
+        # Extract entries from bundled HTML
+        calendar_parser = BeautifulSoup(calendar_result['responseContent'], 'html.parser')
+
+        pickup_entries = []
+
+        for element in calendar_parser.find_all('article'):
+            _LOGGER.debug(f"Parsing collection: {element!r}")
+
+            waste_type = element.h3.string
+
+            # Extract and parse collection date
+            waste_date_match = self.OC_RE_DATE_STR.match(element.find(class_='next-service').string.strip())
+
+            if waste_date_match is None:
+                continue
+
+            waste_date = datetime.strptime(waste_date_match[1], '%d/%m/%Y').date()
+
+            # Base icon on type
+            waste_icon = ICON_MAP.get(waste_type.lower(), 'mdi:trash-can')
+
+            pickup_entries.append(Collection(waste_date, waste_type, waste_icon))
+            _LOGGER.info(f"Collection for {waste_type} (icon: {waste_icon}) on {waste_date}")
+
+        return pickup_entries

+ 68 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/belmont_wa_gov_au.py

@@ -0,0 +1,68 @@
+import datetime
+
+import requests
+from waste_collection_schedule import Collection
+
+TITLE = "Belmont City Council"
+DESCRIPTION = "Source for Belmont City Council rubbish collection."
+URL = "https://www.belmont.wa.gov.au/"
+TEST_CASES = {
+    "PETstock Belmont": {"address": "196 Abernethy Road Belmont 6104"},
+    "Belgravia Medical Centre": {"address": "374 Belgravia Street Cloverdale 6105"},
+    "IGA Rivervale": {"address": "126 Kooyong Road Rivervale 6103"},
+}
+
+
+class Source:
+    def __init__(self, address: str):
+        self._address = address
+
+    def fetch(self):
+        params = {"key": self._address}
+        r = requests.get(
+            "https://www.belmont.wa.gov.au/api/intramaps/getaddresses", params=params
+        )
+        r.raise_for_status()
+        j = r.json()
+
+        if len(j) == 0:
+            raise Exception("address not found")
+
+        if len(j) > 1:
+            raise Exception("multiple addresses found")
+
+        params = {"mapkey": j[0]["mapkey"], "dbkey": j[0]["dbkey"]}
+        r = requests.get(
+            "https://www.belmont.wa.gov.au/api/intramaps/getpropertydetailswithlocalgov",
+            params=params,
+        )
+        r.raise_for_status()
+        data = r.json()["data"]
+
+        entries = []
+
+        # get general waste
+        date = datetime.datetime.strptime(
+            data["BinDayGeneralWasteFormatted"], "%Y-%m-%dT%H:%M:%S"
+        ).date()
+        entries.append(
+            Collection(
+                date=date,
+                t="General Waste",
+                icon="mdi:trash-can",
+            )
+        )
+
+        # get recycling
+        date = datetime.datetime.strptime(
+            data["BinDayRecyclingFormatted"], "%Y-%m-%dT%H:%M:%S"
+        ).date()
+        entries.append(
+            Collection(
+                date=date,
+                t="Recycling",
+                icon="mdi:recycle",
+            )
+        )
+
+        return entries

+ 91 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py

@@ -0,0 +1,91 @@
+import json
+from datetime import datetime
+from html.parser import HTMLParser
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Berline Recycling"
+DESCRIPTION = "Source for Berlin Recycling waste collection."
+URL = "https://berlin-recycling.de"
+TEST_CASES = {
+    "Germanenstrasse": {
+        "username": "!secret berlin_recycling_username",
+        "password": "!secret berlin_recycling_password",
+    },
+}
+
+
+# Parser for HTML input (hidden) text
+class HiddenInputParser(HTMLParser):
+    def __init__(self):
+        super().__init__()
+        self._args = {}
+
+    @property
+    def args(self):
+        return self._args
+
+    def handle_starttag(self, tag, attrs):
+        if tag == "input":
+            d = dict(attrs)
+            if str(d["type"]).lower() == "hidden":
+                self._args[d["name"]] = d["value"] if "value" in d else ""
+
+
+SERVICE_URL = "https://kundenportal.berlin-recycling.de/"
+
+
+class Source:
+    def __init__(self, username, password):
+        self._username = username
+        self._password = password
+
+    def fetch(self):
+        session = requests.session()
+
+        # first get returns session specific url
+        r = session.get(SERVICE_URL, allow_redirects=False, verify=False)
+
+        # get session id's
+        r = session.get(r.url)
+
+        parser = HiddenInputParser()
+        parser.feed(r.text)
+        args = parser.args
+        args["__EVENTTARGET"] = "btnLog"
+        args["__EVENTARGUMENT"] = None
+        args["Username"] = self._username
+        args["Password"] = self._password
+
+        # login
+        r = session.post(r.url, data=args)
+        serviceUrl = r.url
+
+        request_data = {"withhtml": "true"}
+        r = session.post(serviceUrl + "/GetDashboard", json=request_data)
+
+        request_data = {"datasettable": "ENWIS_ABFUHRKALENDER"}
+        r = session.post(serviceUrl + "/ChangeDatasetTable", json=request_data)
+
+        request_data = {
+            "datasettablecode": "ENWIS_ABFUHRKALENDER",
+            "startindex": 0,
+            "searchtext": "",
+            "rangefilter": "",
+            "ordername": "",
+            "orderdir": "",
+            "ClientParameters": "",
+            "headrecid": "",
+        }
+        r = session.post(serviceUrl + "/GetDatasetTableHead", json=request_data)
+
+        data = json.loads(r.text)
+        # load json again, because response is double coded
+        data = json.loads(data["d"])
+
+        entries = []
+        for d in data["data"]:
+            date = datetime.strptime(d["Task Date"], "%Y-%m-%d").date()
+            entries.append(Collection(date, d["Material Description"]))
+        return entries

+ 103 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/bielefeld_de.py

@@ -0,0 +1,103 @@
+from html.parser import HTMLParser
+
+import requests
+
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "Bielefeld"
+DESCRIPTION = "Source for Stadt Bielefeld."
+URL = "https://bielefeld.de"
+TEST_CASES = {
+    "Umweltbetrieb": {
+        "street": " Eckendorfer Straße",
+        "house_number": 57,
+    },
+}
+SERVLET = (
+    "https://anwendungen.bielefeld.de/WasteManagementBielefeld/WasteManagementServlet"
+)
+
+# Parser for HTML input (hidden) text
+class HiddenInputParser(HTMLParser):
+    def __init__(self):
+        super().__init__()
+        self._args = {}
+
+    @property
+    def args(self):
+        return self._args
+
+    def handle_starttag(self, tag, attrs):
+        if tag == "input":
+            d = dict(attrs)
+            if str(d["type"]).lower() == "hidden":
+                self._args[d["name"]] = d["value"] if "value" in d else ""
+
+
+class Source:
+    def __init__(
+        self, street: str, house_number: int, address_suffix: str = ""
+    ):
+        self._street = street
+        self._hnr = house_number
+        self._suffix = address_suffix
+        self._ics = ICS()
+
+    def fetch(self):
+        session = requests.session()
+
+        r = session.get(
+            SERVLET,
+            params={"SubmitAction": "wasteDisposalServices", "InFrameMode": "TRUE"},
+        )
+        r.raise_for_status()
+        r.encoding = "utf-8"
+
+        parser = HiddenInputParser()
+        parser.feed(r.text)
+
+        args = parser.args
+        args["Ort"] = self._street[0]
+        args["Strasse"] = self._street
+        args["Hausnummer"] = str(self._hnr)
+        args["Hausnummerzusatz"] = self._suffix
+        args["SubmitAction"] = "CITYCHANGED"
+        args["ApplicationName"] = "com.athos.kd.bielefeld.CheckAbfuhrTermineParameterBusinessCase"
+        args["ContainerGewaehlt_1"] = "on"
+        args["ContainerGewaehlt_2"] = "on"
+        args["ContainerGewaehlt_3"] = "on"
+        args["ContainerGewaehlt_4"] = "on"
+        r = session.post(
+            SERVLET,
+            data=args,
+        )
+        r.raise_for_status()
+
+        args["SubmitAction"] = "forward"
+        r = session.post(
+            SERVLET,
+            data=args,
+        )
+        r.raise_for_status()
+
+        reminder_day = "keine Erinnerung" # "keine Erinnerung", "am Vortag", "2 Tage vorher", "3 Tage vorher"
+        reminder_time = "18:00 Uhr" # "XX:00 Uhr"
+
+        args["ApplicationName"] = "com.athos.kd.bielefeld.AbfuhrTerminModel"
+        args["SubmitAction"] = "filedownload_ICAL"
+        args["ICalErinnerung"] = reminder_day
+        args["ICalZeit"] = reminder_time
+        r = session.post(
+            SERVLET,
+            data=args,
+        )
+        r.raise_for_status()
+
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+
+        return entries

+ 109 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py

@@ -0,0 +1,109 @@
+import logging
+from html.parser import HTMLParser
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "BMV.at"
+DESCRIPTION = "Source for BMV, Austria"
+URL = "https://www.bmv.at"
+TEST_CASES = {
+    "Allersdorf": {"ort": "ALLERSDORF", "strasse": "HAUSNUMMER", "hausnummer": 9},
+    "Bad Sauerbrunn": {
+        "ort": "BAD SAUERBRUNN",
+        "strasse": "BUCHINGERWEG",
+        "hausnummer": 16,
+    },
+    "Rattersdorf": {
+        "ort": "RATTERSDORF",
+        "strasse": "SIEBENBRÜNDLGASSE",
+        "hausnummer": 30,
+    },
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+
+# Parser for HTML input (hidden) text
+class HiddenInputParser(HTMLParser):
+    def __init__(self):
+        super().__init__()
+        self._args = {}
+
+    @property
+    def args(self):
+        return self._args
+
+    def handle_starttag(self, tag, attrs):
+        if tag == "input":
+            d = dict(attrs)
+            if d["type"] == "HIDDEN":
+                self._args[d["name"]] = d.get("value")
+
+
+class Source:
+    def __init__(self, ort, strasse, hausnummer):
+        self._ort = ort
+        self._strasse = strasse
+        self._hausnummer = hausnummer
+        self._ics = ICS()
+
+    def fetch(self):
+        session = requests.session()
+
+        r = session.get(
+            "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet?SubmitAction=wasteDisposalServices&InFrameMode=TRUE"
+        )
+
+        # add all hidden input fields to form data
+        p = HiddenInputParser()
+        p.feed(r.text)
+        args = p.args
+
+        args["Focus"] = "Ort"
+        args["SubmitAction"] = "changedEvent"
+        args["Ort"] = self._ort
+        args["Strasse"] = "HAUSNUMMER"
+        args["Hausnummer"] = 0
+        r = session.post(
+            "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet", data=args
+        )
+
+        args["Focus"] = "Strasse"
+        args["SubmitAction"] = "changedEvent"
+        args["Ort"] = self._ort
+        args["Strasse"] = self._strasse
+        args["Hausnummer"] = 0
+        r = session.post(
+            "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet", data=args
+        )
+
+        args["Focus"] = "Hausnummer"
+        args["SubmitAction"] = "forward"
+        args["Ort"] = self._ort
+        args["Strasse"] = self._strasse
+        args["Hausnummer"] = self._hausnummer
+        r = session.post(
+            "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet", data=args
+        )
+
+        args["ApplicationName"] = "com.athos.kd.udb.AbfuhrTerminModel"
+        args["Focus"] = None
+        args["IsLastPage"] = "true"
+        args["Method"] = "POST"
+        args["PageName"] = "Terminliste"
+        args["SubmitAction"] = "filedownload_ICAL"
+        del args["Ort"]
+        del args["Strasse"]
+        del args["Hausnummer"]
+        r = session.post(
+            "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet", data=args
+        )
+
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries

+ 96 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/bradford_gov_uk.py

@@ -0,0 +1,96 @@
+import json
+from datetime import datetime
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+from bs4 import BeautifulSoup
+from urllib.parse import urlparse
+import logging
+import http.client as http_client
+import ssl
+import urllib3
+
+TITLE = "Bradford.gov.uk"
+DESCRIPTION = (
+    "Source for Bradford.gov.uk services for Bradford Metropolitan Council, UK."
+)
+URL = "https://onlineforms.bradford.gov.uk/ufs/"
+TEST_CASES = {
+    "Ilkley": {"uprn": "100051250665"},
+    "Bradford": {"uprn": "100051239296"},
+    "Baildon": {"uprn": "10002329242"},
+}
+
+ICONS = {
+    "REFUSE": "mdi:trash-can",
+    "RECYCLING": "mdi:recycle",
+    "GARDEN": "mdi:leaf",
+}
+
+from pprint import pprint
+
+class CustomHttpAdapter (requests.adapters.HTTPAdapter):
+    '''Transport adapter" that allows us to use custom ssl_context.'''
+
+    def __init__(self, ssl_context=None, **kwargs):
+        self.ssl_context = ssl_context
+        super().__init__(**kwargs)
+
+    def init_poolmanager(self, connections, maxsize, block=False):
+        self.poolmanager = urllib3.poolmanager.PoolManager(
+            num_pools=connections, maxsize=maxsize,
+            block=block, ssl_context=self.ssl_context)
+
+class Source:
+    def __init__(self, uprn: str):
+        self._uprn = uprn
+
+    def fetch(self):
+        entries = []
+
+        s = requests.Session()
+        # In openssl3 some context is needed to access this host
+        # or an UNSAFE_LEGACY_RENEGOTIATION_DISABLED error will occur
+        ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
+        ctx.options |= 0x4
+        s.mount("https://", CustomHttpAdapter(ctx))
+
+        s.cookies.set(
+            "COLLECTIONDATES", self._uprn, domain="onlineforms.bradford.gov.uk"
+        )
+        r = s.get(f"{URL}/collectiondates.eb")
+
+        soup = BeautifulSoup(r.text, features="html.parser")
+        div = soup.find_all("table", {"role": "region"})
+        for region in div:
+            displayClass = list(
+                filter(lambda x: x.endswith("-Override-Panel"), region["class"])
+            )
+            if len(displayClass) > 0:
+                heading = region.find_all(
+                    "td", {"class": displayClass[0].replace("Panel", "Header")}
+                )
+                type = "UNKNOWN"
+                if "General" in heading[0].text:
+                    type = "REFUSE"
+                elif "Recycling" in heading[0].text:
+                    type = "RECYCLING"
+                elif "Garden" in heading[0].text:
+                    type = "GARDEN"
+                lines = region.find_all("div", {"type": "text"})
+                for entry in lines:
+                    try:
+                        entries.append(
+                            Collection(
+                                date=datetime.strptime(
+                                    entry.text.strip(), "%a %b %d %Y"
+                                ).date(),
+                                t=type,
+                                icon=ICONS[type],
+                            )
+                        )
+                    except ValueError:
+                        pass  # ignore date conversion failure for not scheduled collections
+
+        return entries

+ 127 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/brisbane_qld_gov_au.py

@@ -0,0 +1,127 @@
+import json
+from datetime import date, timedelta
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Brisbane City Council"
+DESCRIPTION = "Source for Brisbane City Council rubbish collection."
+URL = "https://www.brisbane.qld.gov.au/clean-and-green/rubbish-tips-and-bins/rubbish-collections/bin-collection-calendar"
+TEST_CASES = {
+    "Suburban Social": {
+        "suburb": "Chapel Hill",
+        "street_name": "Moordale St",
+        "street_number": "3",
+    },
+    "The Scratch Bar": {
+        "suburb": "Milton",
+        "street_name": "Park Rd",
+        "street_number": "8/1",
+    },
+    "Green Beacon": {
+        "suburb": "Teneriffe",
+        "street_name": "Helen St",
+        "street_number": "26",
+    },
+}
+
+HEADERS = {"user-agent": "Mozilla/5.0"}
+
+
+class Source:
+    def __init__(self, suburb, street_name, street_number):
+        self.suburb = suburb
+        self.street_name = street_name
+        self.street_number = street_number
+
+    def fetch(self):
+
+        suburb_id = 0
+        street_id = 0
+        property_id = 0
+        today = date.today()
+        nextmonth = today + timedelta(30)
+
+        # Retrieve suburbs
+        r = requests.get(
+            "https://brisbane.waste-info.com.au/api/v1/localities.json", headers=HEADERS
+        )
+        data = json.loads(r.text)
+
+        # Find the ID for our suburb
+        for item in data["localities"]:
+            if item["name"] == self.suburb:
+                suburb_id = item["id"]
+                break
+
+        if suburb_id == 0:
+            return []
+
+        # Retrieve the streets in our suburb
+        r = requests.get(
+            f"https://brisbane.waste-info.com.au/api/v1/streets.json?locality={suburb_id}",
+            headers=HEADERS,
+        )
+        data = json.loads(r.text)
+
+        # Find the ID for our street
+        for item in data["streets"]:
+            if item["name"] == self.street_name:
+                street_id = item["id"]
+                break
+
+        if street_id == 0:
+            return []
+
+        # Retrieve the properties in our street
+        r = requests.get(
+            f"https://brisbane.waste-info.com.au/api/v1/properties.json?street={street_id}",
+            headers=HEADERS,
+        )
+        data = json.loads(r.text)
+
+        # Find the ID for our property
+        for item in data["properties"]:
+            if item["name"] == f"{self.street_number} {self.street_name} {self.suburb}":
+                property_id = item["id"]
+                break
+
+        if property_id == 0:
+            return []
+
+        # Retrieve the upcoming collections for our property
+        r = requests.get(
+            f"https://brisbane.waste-info.com.au/api/v1/properties/{property_id}.json?start={today}&end={nextmonth}",
+            headers=HEADERS,
+        )
+
+        data = json.loads(r.text)
+
+        entries = []
+
+        for item in data:
+            if "start" in item:
+                collection_date = date.fromisoformat(item["start"])
+                if (collection_date - today).days >= 0:
+                    # Only consider recycle and organic events
+                    if item["event_type"] in ["recycle","organic"]:
+                        # Every collection day includes rubbish
+                        entries.append(
+                            Collection(
+                                date=collection_date, t="Rubbish", icon="mdi:trash-can"
+                            )
+                        )
+                        if item["event_type"] == "recycle":
+                            entries.append(
+                                Collection(
+                                    date=collection_date, t="Recycling", icon="mdi:recycle"
+                                )
+                            )
+                        if item["event_type"] == "organic":
+                            entries.append(
+                                Collection(
+                                    date=collection_date, t="Garden", icon="mdi:leaf"
+                                )
+                            )
+
+        return entries

+ 97 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py

@@ -0,0 +1,97 @@
+import urllib.parse
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "BSR"
+DESCRIPTION = "Source for Berliner Stadtreinigungsbetriebe waste collection."
+URL = "https://bsr.de"
+TEST_CASES = {
+    "Bahnhofstr., 12159 Berlin (Tempelhof-Schöneberg)": {
+        "abf_strasse": "Bahnhofstr., 12159 Berlin (Tempelhof-Schöneberg)",
+        "abf_hausnr": 1,
+    },
+    "Am Ried, 13467 Berlin (Reinickendorf)": {
+        "abf_strasse": "Am Ried, 13467 Berlin (Reinickendorf)",
+        "abf_hausnr": "11G",
+    },
+}
+
+
+def myquote(s):
+    # bsr uses strange quoting
+    return urllib.parse.quote(s, safe=",()")
+
+
+class Source:
+    def __init__(self, abf_strasse, abf_hausnr):
+        self._abf_strasse = abf_strasse
+        self._abf_hausnr = abf_hausnr
+        self._ics = ICS()
+
+    def fetch(self):
+        # get cookie
+        r = requests.get("https://www.bsr.de/abfuhrkalender-20520.php")
+        cookies = r.cookies
+
+        # get street name only (without PLZ)
+        street = self._abf_strasse.split(",")[0]
+
+        # start search using string name (without PLZ)
+        args = {"script": "dynamic_search", "step": 1, "q": street}
+        r = requests.get(
+            "https://www.bsr.de/abfuhrkalender_ajax.php", params=args, cookies=cookies
+        )
+
+        # retrieve house number list
+        args = {"script": "dynamic_search", "step": 2, "q": self._abf_strasse}
+        r = requests.get(
+            "https://www.bsr.de/abfuhrkalender_ajax.php", params=args, cookies=cookies
+        )
+
+        args = {
+            "abf_strasse": street,
+            "abf_hausnr": self._abf_hausnr,
+            "tab_control": "Jahr",
+            "abf_config_weihnachtsbaeume": "",
+            "abf_config_restmuell": "on",
+            "abf_config_biogut": "on",
+            "abf_config_wertstoffe": "on",
+            "abf_config_laubtonne": "on",
+            # "abf_selectmonth": "5 2020",
+            # "abf_datepicker": "28.04.2020",
+            # "listitems":7,
+        }
+        r = requests.post(
+            "https://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_kalender_ajax",
+            data=args,
+            cookies=cookies,
+        )
+
+        args = {
+            "script": "dynamic_iCal_ajax",
+            "abf_strasse": self._abf_strasse,
+            "abf_hausnr": self._abf_hausnr,
+            "tab_control": "Jahr",
+            "abf_config_weihnachtsbaeume": "",
+            "abf_config_restmuell": "on",
+            "abf_config_biogut": "on",
+            "abf_config_wertstoffe": "on",
+            "abf_config_laubtonne": "on",
+            # "abf_selectmonth": "5 2020",
+            # "listitems":7,
+        }
+
+        # create url using private url encoding
+        encoded = map(lambda key: f"{key}={myquote(str(args[key]))}", args.keys())
+        url = "https://www.bsr.de/abfuhrkalender_ajax.php?" + "&".join(encoded)
+        r = requests.get(url, cookies=cookies)
+
+        # parse ics file
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries

+ 52 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py

@@ -0,0 +1,52 @@
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "C-Trace.de"
+DESCRIPTION = "Source for C-Trace.de."
+URL = "https://c-trace.de/"
+TEST_CASES = {"Bremen": {"ort": "Bremen", "strasse": "Abbentorstraße", "hausnummer": 5}}
+
+
+BASE_URL = "https://web.c-trace.de"
+SERVICE_MAP = {"Bremen": "bremenabfallkalender"}
+
+
+class Source:
+    def __init__(self, ort, strasse, hausnummer):
+        self._ort = ort
+        self._strasse = strasse
+        self._hausnummer = hausnummer
+        self._ics = ICS(regex=r"Abfuhr: (.*)")
+
+    def fetch(self):
+        service = SERVICE_MAP.get(self._ort)
+        if service is None:
+            raise Exception(f"no service for {self._ort}")
+
+        session = requests.session()
+
+        # get session url
+        r = session.get(f"{BASE_URL}/{service}/Abfallkalender", allow_redirects=False,)
+        session_id = r.headers["location"].split("/")[
+            2
+        ]  # session_id like "(S(r3bme50igdgsp2lstgxxhvs2))"
+
+        args = {
+            "Gemeinde": self._ort,
+            "Strasse": self._strasse,
+            "Hausnr": self._hausnummer,
+        }
+        r = session.get(
+            f"{BASE_URL}/{service}/{session_id}/abfallkalender/cal", params=args
+        )
+        r.raise_for_status()
+
+        # parse ics file
+        r.encoding = "utf-8"
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries

+ 70 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/cambridge_gov_uk.py

@@ -0,0 +1,70 @@
+import logging
+from datetime import datetime
+
+import requests
+from waste_collection_schedule import Collection
+
+TITLE = "Cambridge.gov.uk"
+DESCRIPTION = (
+    "Source for cambridge.gov.uk services for Cambridge and part of Cambridgeshire"
+)
+URL = "cambridge.gov.uk"
+TEST_CASES = {
+    "houseNumber": {"post_code": "CB13JD", "number": 37},
+    "houseName": {"post_code": "cb215hd", "number": "ROSEMARY HOUSE"},
+}
+
+API_URLS = {
+    "address_search": "https://servicelayer3c.azure-api.net/wastecalendar/address/search/",
+    "collection": "https://servicelayer3c.azure-api.net/wastecalendar/collection/search/{}/",
+}
+
+ICONS = {
+    "DOMESTIC": "mdi:trash-can",
+    "RECYCLE": "mdi:recycle",
+    "ORGANIC": "mdi:leaf",
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(self, post_code: str, number: str):
+        self._post_code = post_code
+        self._number = str(number).capitalize()
+
+    def fetch(self):
+        # fetch location id
+        r = requests.get(
+            API_URLS["address_search"], params={"postCode": self._post_code}
+        )
+        r.raise_for_status()
+        addresses = r.json()
+
+        address_ids = [
+            x["id"] for x in addresses if x["houseNumber"].capitalize() == self._number
+        ]
+
+        if len(address_ids) == 0:
+            raise Exception(f"Could not find address {self._post_code} {self._number}")
+
+        q = str(API_URLS["collection"]).format(address_ids[0])
+        r = requests.get(q)
+        r.raise_for_status()
+
+        collections = r.json()["collections"]
+        entries = []
+
+        for collection in collections:
+            for round_type in collection["roundTypes"]:
+                entries.append(
+                    Collection(
+                        date=datetime.strptime(
+                            collection["date"], "%Y-%m-%dT%H:%M:%SZ"
+                        ).date(),
+                        t=round_type.title(),
+                        icon=ICONS.get(round_type),
+                    )
+                )
+
+        return entries

+ 111 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py

@@ -0,0 +1,111 @@
+import datetime
+import json
+
+import requests
+from bs4 import BeautifulSoup
+from requests.utils import requote_uri
+from waste_collection_schedule import Collection
+
+TITLE = "Campbelltown City Council"
+DESCRIPTION = "Source for Campbelltown City Council rubbish collection."
+URL = "https://www.campbelltown.nsw.gov.au/"
+TEST_CASES = {
+    "Minto Mall": {
+        "post_code": "2566",
+        "suburb": "Minto",
+        "street_name": "Brookfield Road",
+        "street_number": "10",
+    },
+    "Campbelltown Catholic Club": {
+        "post_code": "2560",
+        "suburb": "Campbelltown",
+        "street_name": "Camden Road",
+        "street_number": "20-22",
+    },
+    "Australia Post Ingleburn": {
+        "post_code": "2565",
+        "suburb": "INGLEBURN",
+        "street_name": "Oxford Road",
+        "street_number": "34",
+    },
+}
+
+API_URLS = {
+    "address_search": "https://www.campbelltown.nsw.gov.au/ocsvc/public/spatial/findaddress?address={}",
+    "collection": "https://www.campbelltown.nsw.gov.au/ocsvc/Public/InMyNeighbourhood/WasteServices?GeoLocationId={}",
+}
+
+HEADERS = {"user-agent": "Mozilla/5.0"}
+
+ICON_MAP = {
+    "General Waste": "trash-can",
+    "Recycling": "mdi:recycle",
+    "Green Waste": "mdi:leaf",
+}
+
+
+class Source:
+    def __init__(
+        self, post_code: str, suburb: str, street_name: str, street_number: str
+    ):
+        self.post_code = post_code
+        self.suburb = suburb
+        self.street_name = street_name
+        self.street_number = street_number
+
+    def fetch(self):
+        locationId = 0
+
+        address = "{} {} {} NSW {}".format(
+            self.street_number, self.street_name, self.suburb, self.post_code
+        )
+
+        q = requote_uri(str(API_URLS["address_search"]).format(address))
+
+        # Retrieve suburbs
+        r = requests.get(q, headers=HEADERS)
+
+        data = json.loads(r.text)
+
+        # Find the ID for our suburb
+        for item in data["locations"]:
+            locationId = item["Id"]
+            break
+
+        if locationId == 0:
+            return []
+
+        # Retrieve the upcoming collections for our property
+        q = requote_uri(str(API_URLS["collection"]).format(locationId))
+
+        r = requests.get(q, headers=HEADERS)
+
+        data = json.loads(r.text)
+
+        responseContent = data["responseContent"]
+
+        soup = BeautifulSoup(responseContent, "html.parser")
+        services = soup.find_all("div", attrs={"class": "service-details"})
+
+        entries = []
+
+        for item in services:
+            # test if <div> contains a valid date. If not, is is not a collection item.
+            date_text = item.find("span")
+            try:
+                date = datetime.datetime.strptime(date_text.text, "%A%d %b %Y").date()
+
+            except ValueError:
+                continue
+
+            waste_type = item.contents[0].strip()
+
+            entries.append(
+                Collection(
+                    date=date,
+                    t=waste_type,
+                    icon=ICON_MAP.get(waste_type, "mdi:trash-can"),
+                )
+            )
+
+        return entries

+ 84 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/canterbury_gov_uk.py

@@ -0,0 +1,84 @@
+import logging
+from datetime import datetime
+import json
+
+import requests
+from waste_collection_schedule import Collection
+
+TITLE = "canterbury.gov.uk"
+DESCRIPTION = (
+    "Source for canterbury.gov.uk services for canterbury"
+)
+URL = "canterbury.gov.uk"
+TEST_CASES = {
+    "houseNumber": {"post_code": "ct68ru", "number": "63"},
+    "houseName": {"post_code": "ct68ru", "number": "KOWLOON"},
+}
+
+API_URLS = {
+    "address_search": "https://trsewmllv7.execute-api.eu-west-2.amazonaws.com/dev/address",
+    "collection":  "https://zbr7r13ke2.execute-api.eu-west-2.amazonaws.com/Beta/get-bin-dates",
+}
+
+ICONS = {
+    "General": "mdi:trash-can",
+    "Recycling": "mdi:recycle",
+    "Food": "mdi:food-apple",
+    "Garden": "mdi:shovel",
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(self, post_code: str, number: str):
+        self._post_code = post_code
+        self._number = str(number).capitalize()
+
+    def fetch(self):
+        # fetch location id
+        r = requests.get(
+            API_URLS["address_search"], params={
+                "postcode": self._post_code, "type": "standard"}
+        )
+        r.raise_for_status()
+        addresses = r.json()
+
+        address_ids = [
+            x for x in addresses["candidates"]
+            if x["attributes"]["PAO_TEXT"].lower() == self._number.lower() or x["attributes"]["PAO_START_NUMBER"].lower() == self._number.lower()
+        ]
+
+        if len(address_ids) == 0:
+            raise Exception(
+                f"Could not find address {self._post_code} {self._number}")
+
+        q = str(API_URLS["collection"])
+        r = requests.post(q, json={
+                          "uprn": address_ids[0]["attributes"]["UPRN"], "usrn": address_ids[0]["attributes"]["USRN"]})
+        r.raise_for_status()
+
+        collectionsRaw = json.loads(r.json()["dates"])
+        collections = {
+            "General": collectionsRaw["blackBinDay"],
+            "Recycling": collectionsRaw["recyclingBinDay"],
+            "Food": collectionsRaw["foodBinDay"],
+            "Garden": collectionsRaw["gardenBinDay"],
+        }
+        entries = []
+
+        for collection in collections:
+            if len(collections[collection]) <= 0:
+                continue
+            for date in collections[collection]:
+                entries.append(
+                    Collection(
+                        date=datetime.strptime(
+                            date, "%Y-%m-%dT%H:%M:%S"
+                        ).date(),
+                        t=collection,
+                        icon=ICONS.get(collection),
+                    )
+                )
+
+        return entries

+ 58 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/ccc_govt_nz.py

@@ -0,0 +1,58 @@
+import datetime
+
+import requests
+from waste_collection_schedule import Collection
+
+TITLE = "Christchurch City Council"
+DESCRIPTION = "Source for Christchurch City Council."
+URL = "https://ccc.govt.nz/services/rubbish-and-recycling/collections"
+TEST_CASES = {"53 Hereford Street": {"address": "53 Hereford Street"}}
+
+
+class Source:
+    def __init__(self, address):
+        self._address = address
+
+    def fetch(self):
+        entries = []
+
+        # Find the Rating Unit ID by the physical address
+        # While a property may have more than one address, bins are allocated by each Rating Unit
+        addressQuery = {
+            "q": self._address,
+            "status": "current",
+            "crs": "epsg:4326",
+            "limit": 1,
+        }
+        r = requests.get(
+            "https://opendata.ccc.govt.nz/CCCSearch/rest/address/suggest",
+            params=addressQuery,
+        )
+        address = r.json()
+
+        # Find the Bin service by Rating Unit ID
+        binsHeaders = {
+            "client_id": "69f433c880c74c349b0128e9fa1b6a93",
+            "client_secret": "139F3D2A83E34AdF98c80566f2eb7212"
+        }
+        r = requests.get(
+            "https://ccc-data-citizen-api-v1-prod.au-s1.cloudhub.io/api/v1/properties/" + str(address[0]["RatingUnitID"]),
+            headers=binsHeaders
+        )
+        bins = r.json()
+        
+        # Deduplicate the Bins in case the Rating Unit has more than one of the same Bin type
+        bins = {each["material"]: each for each in bins["bins"]["collections"]}.values()
+
+        # Process each Bin
+        for bin in bins:
+            entries.append(
+                Collection(
+                    datetime.datetime.strptime(
+                        bin["next_planned_date_app"], "%Y-%m-%d"
+                    ).date(),
+                    bin["material"],
+                )
+            )
+
+        return entries

+ 79 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/cheshire_east_gov_uk.py

@@ -0,0 +1,79 @@
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection
+
+TITLE = "cheshireeast.gov.uk"
+DESCRIPTION = "Source for cheshireeast.gov.uk services for Cheshire East"
+URL = "cheshireeast.gov.uk"
+
+
+TEST_CASES = {
+    "houseUPRN": {"uprn": "100010132071"},
+    "houseAddress": {"postcode": "WA16 0AY", "name_number": "1"},
+}
+
+ICON_MAP = {
+    "General Waste": "mdi:trash-can",
+    "Mixed Recycling": "mdi:recycle",
+    "Garden Waste": "mdi:leaf",
+}
+
+
+class Source:
+    def __init__(self, uprn=None, postcode=None, name_number=None):
+        self._uprn = uprn
+        self._postcode = postcode
+        self._name_number = name_number
+
+    def fetch(self):
+        session = requests.Session()
+
+        if self._postcode and self._name_number:
+            # Lookup postcode and number to get UPRN
+            params = {
+                "postcode": self._postcode,
+                "propertyname": self._name_number,
+            }
+            r = session.get(
+                "https://online.cheshireeast.gov.uk/MyCollectionDay/SearchByAjax/Search",
+                params=params,
+            )
+            r.raise_for_status()
+            soup = BeautifulSoup(r.text, features="html.parser")
+            s = soup.find("a", attrs={"class": "get-job-details"})
+            print(s)
+            if s is None:
+                raise Exception("address not found")
+            self._uprn = s["data-uprn"]
+
+        if self._uprn is None:
+            raise Exception("uprn not set")
+
+        params = {"uprn": self._uprn}
+        r = session.get(
+            "https://online.cheshireeast.gov.uk/MyCollectionDay/SearchByAjax/GetBartecJobList",
+            params=params,
+        )
+        r.raise_for_status()
+
+        soup = BeautifulSoup(r.text, features="html.parser")
+        s = soup.find_all("td", attrs={"class": "visible-cell"})
+
+        entries = []
+
+        for cell in s:
+            labels = cell.find_all("label")
+            if labels:
+                date = datetime.strptime(labels[1].text, "%d/%m/%Y").date()
+                type = labels[2].text.removeprefix("Empty Standard ")
+                entries.append(
+                    Collection(
+                        date=date,
+                        t=type,
+                        icon=ICON_MAP.get(type),
+                    )
+                )
+
+        return entries

+ 115 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/chesterfield_gov_uk.py

@@ -0,0 +1,115 @@
+import json
+import logging
+import requests
+
+from datetime import datetime
+from waste_collection_schedule import Collection
+
+# With verify=True the POST fails due to a SSLCertVerificationError.
+# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:
+# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings
+# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl
+# These two lines areused to suppress the InsecureRequestWarning when using verify=False
+import urllib3
+urllib3.disable_warnings()
+
+
+TITLE = "chesterfield.gov.uk"
+
+DESCRIPTION = (
+    "Source for waste collection services for Chesterfield Borough Council"
+)
+
+URL = "https://www.chesterfield.gov.uk/"
+
+HEADERS = {
+    "user-agent": "Mozilla/5.0",
+}
+
+TEST_CASES = {
+    "Test_001": {"uprn": 74023685},
+    "Test_002": {"uprn": "74009625"},
+    "Test_003": {"uprn": "74035689"},
+    "Test_004": {"uprn": "74020930"},
+}
+
+ICONS = {
+    "DOMESTIC REFUSE": "mdi:trash-can",
+    "DOMESTIC RECYCLING": "mdi:recycle",
+    "DOMESTIC ORGANIC": "mdi:leaf",
+}
+
+APIS = {
+    "session": "https://www.chesterfield.gov.uk/bins-and-recycling/bin-collections/check-bin-collections.aspx",
+    "fwuid": "https://myaccount.chesterfield.gov.uk/anonymous/c/cbc_VE_CollectionDaysLO.app?aura.format=JSON&aura.formatAdapter=LIGHTNING_OUT",
+    "search": "https://myaccount.chesterfield.gov.uk/anonymous/aura?r=2&aura.ApexAction.execute=1",
+}
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(self, uprn=None):
+        self._uprn = str(uprn)
+
+    def fetch(self):
+
+        s = requests.Session()
+        r = s.get(
+            APIS["session"],
+            headers=HEADERS,
+        )
+
+        # Capture fwuid value
+        r = s.get(
+            APIS["fwuid"],
+            verify=False,
+            headers=HEADERS,
+        )
+        resp = json.loads(r.content)
+        fwuid = resp["auraConfig"]["context"]["fwuid"]
+
+        if self._uprn:
+            # POST request returns schedule for matching uprn
+            payload = {
+                "message": '{"actions":[{"id":"4;a","descriptor":"aura://ApexActionController/ACTION$execute","callingDescriptor":"UNKNOWN","params":{"namespace":"","classname":"CBC_VE_CollectionDays","method":"getServicesByUPRN","params":{"propertyUprn":"'
+                + self._uprn
+                + '","executedFrom":"Main Website"},"cacheable":false,"isContinuation":false}}]}',
+                "aura.context": '{"mode":"PROD","fwuid":"'
+                + fwuid
+                + '","app":"c:cbc_VE_CollectionDaysLO","loaded":{"APPLICATION@markup://c:cbc_VE_CollectionDaysLO":"pqeNg7kPWCbx1pO8sIjdLA"},"dn":[],"globals":{},"uad":true}',
+                "aura.pageURI": "/bins-and-recycling/bin-collections/check-bin-collections.aspx",
+                "aura.token": "null",
+            }
+            r = s.post(
+                APIS["search"],
+                data=payload,
+                verify=False,
+                headers=HEADERS,
+            )
+            data = json.loads(r.content)
+
+        entries = []
+
+        # Extract waste types and dates from json
+        for item in data["actions"][0]["returnValue"]["returnValue"]["serviceUnits"]:
+            try:
+                waste_type = item["serviceTasks"][0]["taskTypeName"]
+            except IndexError:
+                # Commercial collection schedule for Residential properties is empty generating IndexError
+                pass
+            else:
+                waste_type = str(waste_type).replace("Collect ", "")
+                dt_zulu = item["serviceTasks"][0]["serviceTaskSchedules"][0]["nextInstance"]["currentScheduledDate"]
+                dt_utc = datetime.strptime(dt_zulu, "%Y-%m-%dT%H:%M:%S.%f%z")
+                dt_local = dt_utc.astimezone(None)
+                entries.append(
+                    Collection(
+                        date=dt_local.date(),
+                        t=waste_type,
+                        icon=ICONS.get(waste_type.upper()),
+                    )
+                )
+
+        return entries

+ 69 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/cochem_zell_online_de.py

@@ -0,0 +1,69 @@
+import contextlib
+from datetime import datetime
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "Abfall Cochem-Zell"
+DESCRIPTION = "Source for waste collection in district Cochem-Zell."
+URL = "https://www.cochem-zell-online.de/abfallkalender/"
+TEST_CASES = {
+    "Alf": {"district": "Alf"},
+    "Bullay": {"district": "Bullay"},
+    "Zell-Stadt": {"district": "Zell-Stadt"},
+    "Pünderich": {"district": "Pünderich"},
+}
+
+API_URL = "https://abfallkalender10.app.moba.de/Cochem_Zell/api"
+REMINDER_DAY = 0  # The calendar event should be on the same day as the waste collection
+REMINDER_HOUR = 6  # The calendar event should start on any hour of the correct day, so this does not matter much
+FILENAME = "Abfallkalender.ics"
+ICON_MAP = {
+    "Biotonne": "mdi:leaf",
+    "Gruengut": "mdi:forest",
+    "Papierabfall": "mdi:package-variant",
+    "Restmülltonne": "mdi:trash-can",
+    "Umweltmobil": "mdi:truck",
+    "Verpackungsabfall": "mdi:recycle",
+}
+
+
+class Source:
+    def __init__(self, district: str):
+        self._district = district
+        self._ics = ICS()
+
+    def fetch(self):
+        now = datetime.now()
+        entries = self._fetch_year(now.year)
+
+        if now.month == 12:
+            # also get data for next year if we are already in december
+            with contextlib.suppress(Exception):
+                entries.extend(self._fetch_year(now.year + 1))
+
+        return entries
+
+    def _fetch_year(self, year: int):
+        url = "/".join(
+            str(param)
+            for param in (
+                API_URL,
+                self._district,
+                year,
+                REMINDER_DAY,
+                REMINDER_HOUR,
+                FILENAME,
+            )
+        )
+
+        r = requests.get(url)
+        schedule = self._ics.convert(r.text)
+
+        return [
+            Collection(
+                date=entry[0], t=entry[1], icon=ICON_MAP.get(entry[1], "mdi:trash-can")
+            )
+            for entry in schedule
+        ]

+ 82 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/colchester_gov_uk.py

@@ -0,0 +1,82 @@
+import json
+from datetime import datetime, timedelta
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Colchester.gov.uk"
+DESCRIPTION = "Source for Colchester.gov.uk services for the borough of Colchester, UK."
+URL = "https://colchester.gov.uk"
+TEST_CASES = {
+    # "High Street, Colchester": {"llpgid": "1197e725-3c27-e711-80fa-5065f38b5681"},  # Should be 0
+    "Church Road, Colchester": {"llpgid": "30213e07-6027-e711-80fa-5065f38b56d1"},
+    "The Lane, Colchester": {"llpgid": "7cd96a3d-6027-e711-80fa-5065f38b56d1"},
+}
+
+ICONS = {
+    "Black bags": "mdi:trash-can",
+    "Glass": "mdi:glass-fragile",
+    "Cans": "mdi:trash-can",
+    "Textiles": "mdi:hanger",
+    "Paper/card": "mdi:recycle",
+    "Plastics": "mdi:recycle",
+    "Garden waste": "mdi:leaf",
+    "Food waste": "mdi:food",
+}
+
+
+class Source:
+    def __init__(self, llpgid):
+        self._llpgid = llpgid
+
+    def fetch(self):
+        # get json file
+        r = requests.get(
+            f"https://new-llpg-app.azurewebsites.net/api/calendar/{self._llpgid}"
+        )
+
+        # extract data from json
+        data = json.loads(r.text)
+
+        entries = []
+
+        for weeks in data["Weeks"]:
+            rows = weeks["Rows"]
+            for key in iter(rows):
+                for day in rows[key]:
+                    try:
+                        # Colchester.gov.uk provide their rubbish collection information in the format of a 2-week
+                        # cycle. These weeks represent 'Blue' weeks and 'Green' weeks (Traditionally, non-recyclables
+                        # and recyclable weeks). The way the JSON response represents this is by specifying the
+                        # `DatesOfFirstCollectionDays`, the first collection day of the cycle, and having a boolean
+                        # `WeekOne` field in each week representing if it's the first week of the cycle, a 'Blue' week,
+                        # or the second, a 'Green' week. If the week is not `WeekOne`, a 'Blue' week,  then 7 days need
+                        # to be added to the `DatesOfFirstCollectionDays` date to provide the correct 'Green' week
+                        # collection date.
+                        date = datetime.strptime(
+                            data["DatesOfFirstCollectionDays"][key], "%Y-%m-%dT%H:%M:%S"
+                        )
+                        if not weeks["WeekOne"]:
+                            date = date + timedelta(days=7)
+                        if date > datetime.now():
+                            entries.append(
+                                Collection(
+                                    date=date.date(),
+                                    t=day["Name"].title(),
+                                    icon=ICONS[day["Name"]],
+                                )
+                            )
+                        # As Colchester.gov.uk only provides the current collection cycle, the next must be extrapolated
+                        # from the current week. This is the same method the website uses to display further collection
+                        # weeks.
+                        entries.append(
+                            Collection(
+                                date=date.date() + timedelta(days=14),
+                                t=day["Name"].title(),
+                                icon=ICONS[day["Name"]],
+                            )
+                        )
+                    except ValueError:
+                        pass  # ignore date conversion failure for not scheduled collections
+
+        return entries

+ 64 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/cornwall_gov_uk.py

@@ -0,0 +1,64 @@
+from datetime import date, datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection
+
+TITLE = "Cornwall Council, UK"
+DESCRIPTION = "Source for cornwall.gov.uk services for Cornwall Council"
+URL = "cornwall.gov.uk"
+TEST_CASES = {
+    "known_uprn": {"uprn": "100040118005"},
+    "unknown_uprn": {"postcode": "TR261SP", "housenumberorname": "7"},
+}
+SEARCH_URLS = {
+    "uprn_search": "https://www.cornwall.gov.uk/my-area/",
+    "collection_search": "https://www.cornwall.gov.uk/umbraco/Surface/Waste/MyCollectionDays?subscribe=False",
+}
+COLLECTIONS = {"Rubbish", "Recycling"}
+
+
+class Source:
+    def __init__(
+        self, uprn=None, postcode=None, housenumberorname=None
+    ):  # argX correspond to the args dict in the source configuration
+        self._uprn = uprn
+        self._postcode = postcode
+        self._housenumberorname = housenumberorname
+
+    def fetch(self):
+        entries = []
+        session = requests.Session()
+
+        # Find the UPRN based on the postcode and the property name/number
+        if self._uprn is None:
+            args = {"Postcode": self._postcode}
+            r = session.get(SEARCH_URLS["uprn_search"], params=args)
+            r.raise_for_status()
+            soup = BeautifulSoup(r.text, features="html.parser")
+            propertyUprns = soup.find(id="Uprn").find_all("option")
+            for match in propertyUprns:
+                if match.text.startswith(self._housenumberorname):
+                    self._uprn = match["value"]
+
+        # Get the collection days based on the UPRN (either supplied through arguments or searched for above)
+        if self._uprn is not None:
+            args = {"uprn": self._uprn}
+            r = session.get(SEARCH_URLS["collection_search"], params=args)
+            r.raise_for_status()
+            soup = BeautifulSoup(r.text, features="html.parser")
+            for collection in COLLECTIONS:
+                d = (
+                    soup.find(id=collection.lower()).find_all("span")[-1].text
+                    + " "
+                    + str(date.today().year)
+                )
+
+                entries.append(
+                    Collection(
+                        datetime.strptime(d, "%d %b %Y").date(),
+                        collection,
+                    )
+                )
+
+        return entries

+ 34 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/data_umweltprofis_at.py

@@ -0,0 +1,34 @@
+import logging
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "UMWELTPROFIS"
+DESCRIPTION = "Source for Umweltprofis"
+URL = "https://www.umweltprofis.at"
+TEST_CASES = {
+    "Ebensee": {"url": "https://data.umweltprofis.at/OpenData/AppointmentService/AppointmentService.asmx/GetIcalWastePickupCalendar?key=KXX_K0bIXDdk0NrTkk3xWqLM9-bsNgIVBE6FMXDObTqxmp9S39nIqwhf9LTIAX9shrlpfCYU7TG_8pS9NjkAJnM_ruQ1SYm3V9YXVRfLRws1"},
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(self, url):
+        self._url = url
+        self._ics = ICS()
+
+    def fetch(self):
+        r = requests.get(self._url)
+        if r.status_code != 200:
+            _LOGGER.error("Error querying calendar data")
+            return []
+
+        fixed_text = r.text.replace("REFRESH - INTERVAL; VALUE = ", "REFRESH-INTERVAL;VALUE=")
+
+        dates = self._ics.convert(fixed_text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries

+ 89 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/derby_gov_uk.py

@@ -0,0 +1,89 @@
+from datetime import datetime
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+from bs4 import BeautifulSoup
+from urllib.parse import urlsplit, parse_qs
+import logging
+
+TITLE = "Derby.gov.uk"
+DESCRIPTION = "Source for Derby.gov.uk services for Derby City Council, UK."
+URL = "https://secure.derby.gov.uk/binday/"
+TEST_CASES = {
+    # Derby City council wants specific addresses, hopefully these are generic enough.
+    "Community Of The Holy Name, Morley Road, Derby, DE21 4TB": {
+        "premises_id": "100030339868"
+    },
+    "6 Wilsthorpe Road, Derby, DE21 4QR": {"post_code": "DE21 4QR", "house_number": 6},
+    "Allestree Home Improvements, 512 Duffield Road, Derby, DE22 2DL": {
+        "premises_id": "100030310335"
+    },
+}
+
+ICONS = {
+    "Black bin": "mdi:trash-can",
+    "Blue bin": "mdi:recycle",
+    "Brown bin": "mdi:leaf",
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(
+        self, premises_id: int = None, post_code: str = None, house_number: str = None
+    ):
+        self._premises_id = premises_id
+        self._post_code = post_code
+        self._house_number = house_number
+        if not any([self._premises_id, self._post_code and self._house_number]):
+            _LOGGER.error(
+                "premises_id or post_code and house number must be provided in config"
+            )
+        self._session = requests.Session()
+
+    def fetch(self):
+        entries = []
+
+        if self._premises_id is not None:
+            r = requests.get(
+                "https://secure.derby.gov.uk/binday/Binday",
+                params={
+                    "PremisesId": self._premises_id,
+                },
+            )
+        else:
+            # Property search endpoint redirects you to the page, so by caching
+            # The premises_id in future, we save an extra request every check.
+            r = requests.get(
+                "https://secure.derby.gov.uk/binday/StreetSearch",
+                params={
+                    "StreetNamePostcode": self._post_code,
+                    "BuildingNameNumber": self._house_number,
+                },
+            )
+            query = urlsplit(r.url).query
+            params = parse_qs(query)
+            self._premises_id = params["PremisesId"].pop()
+
+        soup = BeautifulSoup(r.text, features="html.parser")
+        results = soup.find_all("div", {"class": "binresult"})
+
+        for result in results:
+            date = result.find("strong")
+            try:
+                date = datetime.strptime(date.text, "%A, %d %B %Y:").date()
+            except ValueError:
+                _LOGGER.error(f"Skipped {date} as it does not match time format")
+                continue
+            img_tag = result.find("img")
+            collection_type = img_tag["alt"]
+            entries.append(
+                Collection(
+                    date=date,
+                    t=collection_type,
+                    icon=ICONS[collection_type],
+                )
+            )
+        return entries

+ 68 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/ecoharmonogram_pl.py

@@ -0,0 +1,68 @@
+import datetime
+from ..collection import Collection
+
+from ..service.EcoHarmonogramPL import Ecoharmonogram
+
+DESCRIPTION = "Source for ecoharmonogram.pl"
+URL = "ecoharmonogram.pl"
+TEST_CASES = {
+    "Simple test case": {"town": "Krzeszowice", "street": "Wyki", "house_number": ""},
+    "Sides multi test case": {"town": "Częstochowa", "street": "Boczna", "additional_sides_matcher": "wie"},
+    "Sides test case": {"town": "Częstochowa", "street": "Azaliowa", "house_number": "1",
+                        "additional_sides_matcher": "jedn"}
+}
+TITLE = "ecoharmonogram.pl"
+
+
+class Source:
+    def __init__(self, town, street="", house_number="", additional_sides_matcher=""):
+        self.town_input = town
+        self.street_input = street
+        self.house_number_input = house_number
+        self.additional_sides_matcher_input = additional_sides_matcher
+
+    def fetch(self):
+
+        town_data = Ecoharmonogram.fetch_town()
+        matching_towns = filter(lambda x: self.town_input.lower() in x.get('name').lower(), town_data.get('towns'))
+        town = list(matching_towns)[0]
+
+        schedule_periods_data = Ecoharmonogram.fetch_scheduled_periods(town)
+        schedule_periods = schedule_periods_data.get("schedulePeriods")
+
+        entries = []
+        for sp in schedule_periods:
+            streets = Ecoharmonogram.fetch_streets(sp, town, self.street_input, self.house_number_input)
+            for street in streets:
+                if self.additional_sides_matcher_input.lower() in street.get("sides").lower():
+                    schedules_response = Ecoharmonogram.fetch_schedules(sp, street)
+                    schedules_raw = schedules_response.get('schedules')
+                    schedules_descriptions_dict = dict()
+                    schedules_descriptions_raw = schedules_response.get('scheduleDescription')
+
+                    for sd in schedules_descriptions_raw:
+                        schedules_descriptions_dict[sd.get('id')] = sd
+
+                    schedules = []
+                    for sr in schedules_raw:
+                        z = sr.copy()
+                        get = schedules_descriptions_dict.get(sr.get('scheduleDescriptionId'))
+                        z['name'] = get.get("name")
+                        schedules.append(z)
+
+                    entries = []
+                    for sch in schedules:
+                        days = sch.get("days").split(';')
+                        month = sch.get("month")
+                        year = sch.get("year")
+                        for d in days:
+                            entries.append(
+                                Collection(
+                                    datetime.date(int(year), int(month), int(d)),
+                                    sch.get('name')
+                                )
+                            )
+                    if self.additional_sides_matcher_input != "":
+                        return entries
+
+        return entries

+ 91 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/egn_abfallkalender_de.py

@@ -0,0 +1,91 @@
+import datetime
+import logging
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "EGN Abfallkalender"
+DESCRIPTION = "Source for EGN Abfallkalender"
+URL = "https://www.egn-abfallkalender.de/kalender"
+TEST_CASES = {
+    "Grevenbroich": {
+        "city": "Grevenbroich",
+        "district": "Noithausen",
+        "street": "Von-Immelhausen-Straße",
+        "housenumber": 12,
+    },
+    "Dormagen": {
+        "city": "Dormagen",
+        "district": "Hackenbroich",
+        "street": "Aggerstraße",
+        "housenumber": 2,
+    },
+    "Grefrath": {
+        "city": "Grefrath",
+        "district": "Grefrath",
+        "street": "An Haus Bruch",
+        "housenumber": 18,
+    },
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+IconMap = {
+    "Grau": "mdi:trash-can",
+    "Gelb": "mdi:sack",
+    "Blau": "mdi:package-variant",
+    "Braun": "mdi:leaf",
+}
+
+
+class Source:
+    def __init__(self, city, district, street, housenumber):
+        self._city = city
+        self._district = district
+        self._street = street
+        self._housenumber = housenumber
+
+    def fetch(self):
+        s = requests.session()
+        r = s.get(URL)
+
+        soup = BeautifulSoup(r.text, features="html.parser")
+        tag = soup.find("meta", {"name": "csrf-token"})
+        if tag is None:
+            return []
+
+        headers = {"x-csrf-token": tag["content"]}
+        post_data = {
+            "city": self._city,
+            "district": self._district,
+            "street": self._street,
+            "street_number": self._housenumber,
+        }
+        r = s.post(URL, data=post_data, headers=headers)
+
+        data = r.json()
+
+        if data.get("error"):
+            for type, errormsg in data["errors"].items():
+                _LOGGER.error(f"{type} - {errormsg}")
+            return []
+
+        entries = []
+        for year, months in data["waste_discharge"].items():
+            for month, days in months.items():
+                for day, types in days.items():
+                    date = datetime.datetime(
+                        year=int(year), month=int(month), day=int(day)
+                    ).date()
+                    for type in types:
+                        color = (
+                            data["trash_type_colors"]
+                            .get(str(type).lower(), type)
+                            .capitalize()
+                        )
+                        entries.append(
+                            Collection(date=date, t=color, icon=IconMap.get(color))
+                        )
+
+        return entries

+ 124 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/elmbridge_gov_uk.py

@@ -0,0 +1,124 @@
+import logging
+import requests
+
+from bs4 import BeautifulSoup
+from datetime import datetime, timedelta
+from waste_collection_schedule import Collection
+
+TITLE = 'elmbridge.gov.uk'
+DESCRIPTION = (
+    'Source for waste collection services for Elmbridge Borough Council'
+)
+URL = 'https://www.elmbridge.gov.uk/waste-and-recycling/'
+
+
+HEADERS = {
+    "user-agent": "Mozilla/5.0",
+}
+
+TEST_CASES = {
+    "Test_001" : {"uprn": 10013119164},
+    "Test_002": {"uprn": "100061309206"},
+    "Test_003": {"uprn": 100062119825},
+    "Test_004": {"uprn": "100061343923"},
+    "Test_005": {"uprn": 100062372553},
+}
+
+API_URLS = {
+    'session': 'https://emaps.elmbridge.gov.uk/myElmbridge.aspx',
+    'search': 'https://emaps.elmbridge.gov.uk/myElmbridge.aspx?action=SetAddress&UniqueId={}',
+    'schedule': 'https://emaps.elmbridge.gov.uk/myElmbridge.aspx?tab=0#Refuse_&_Recycling',
+}
+
+OFFSETS = {
+    'Monday': 0,
+    'Tuesday': 1,
+    'Wednesday': 2,
+    'Thursday': 3,
+    'Friday': 4,
+    'Saturday': 5,
+    'Sunday': 6,
+}
+
+ICONS = {
+    "REFUSE": "mdi:trash-can",
+    "RECYCLING": "mdi:recycle",
+    "FOOD": "mdi:food",
+    "GARDEN": "mdi:leaf",
+}
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(self, uprn: str = None):
+        self._uprn = str(uprn)
+
+    def fetch(self):
+        # API's do not return the year, nor the date of the collection.
+        # They return a list of dates for the beginning of a week, and the day of the week the collection is on.
+        # This script assumes the week-commencing dates are for the current year.
+        # This'll cause problems in December as upcoming January collections will have been assigned dates in the past.
+        # Some clunky logic can deal with this:
+        #   If a date in less than 1 month in the past, it doesn't matter as the collection will have recently occured.
+        #   If a date is more than 1 month in the past, assume it's an incorrectly assigned date and increment the year by 1.
+        # Once that's been done, offset the week-commencing dates to match day of the week each waste collection type is scheduled. 
+        # If you have a better way of doing this, feel free to update via a Pull Request!
+
+        # Get current date and year in format consistent with API result
+        today = datetime.now()
+        today = today.replace(hour = 0, minute = 0, second = 0, microsecond = 0)
+        year = today.year
+
+        s = requests.Session()
+
+        r0 = s.get(API_URLS['session'], headers=HEADERS)
+        r0.raise_for_status()
+        r1 = s.get(API_URLS['search'].format(self._uprn), headers=HEADERS)
+        r1.raise_for_status()
+        r2 = s.get(API_URLS['schedule'], headers=HEADERS)
+        r2.raise_for_status()
+        
+        responseContent = r2.content
+        soup = BeautifulSoup(responseContent, 'html.parser')
+
+        entries = []
+
+        notice = soup.find('div', {'class': 'atPanelContent atFirst atAlt0'})
+        notices = notice.text.replace('\nRefuse and recycling collection days\n', '').split('.')
+        notices.pop(-1) # Remove superflous element
+        frame = soup.find('div', {'class': 'atPanelContent atAlt1 atLast'})
+        table = frame.find('table')
+
+        for tr in table.find_all('tr'):
+            row = []
+            for td in tr.find_all('td'):
+                row.append(td.text.strip())
+            row.pop(1)  # removes superflous element
+            dt = row[0] + ' ' + str(year)
+            dt = datetime.strptime(dt, '%d %b %Y')
+
+            # Amend year, if necessary
+            if (dt - today) < timedelta(days = -31):
+                dt += timedelta(year = 1)
+            row[0] = dt
+
+            # Separate out same-day waste collections
+            wastetypes = row[1].split(' + ')
+
+            # Sort out date offsets for each collection type
+            for waste in wastetypes:
+                for day, offset in OFFSETS.items():
+                    for sentence in notices:
+                        if (waste in sentence) and (day in sentence):
+                            new_date = row[0] + timedelta(days = offset)
+                            entries.append(
+                                Collection(
+                                    date = new_date.date(),
+                                    t = waste + ' bin',
+                                    icon = ICONS.get(waste.upper()),
+                                )
+                            )
+
+        return entries

+ 105 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/environmentfirst_co_uk.py

@@ -0,0 +1,105 @@
+import logging
+import requests
+
+from bs4 import BeautifulSoup
+from dateutil.parser import parse
+from waste_collection_schedule import Collection
+
+TITLE = "environmentfirst.co.uk"
+
+DESCRIPTION = (
+    """Consolidated source for waste collection services from:
+        Eastbourne Borough Council 
+        Lewes District Council
+        """
+)
+
+URL = "https://environmentfirst.co.uk"
+
+TEST_CASES = {
+    "houseUPRN" : {"uprn": "100060063421"},
+    "houseNumber": {"post_code": "BN228SG", "number": 3},
+    "houseName": {"post_code": "BN73LG", "number": "Garden Cottage"},
+}
+
+ICONS = {
+    "RUBBISH": "mdi:trash-can",
+    "RECYCLING": "mdi:recycle",
+    "GARDEN WASTE": "mdi:leaf",
+}
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(self, post_code=None, number=None, name=None, uprn=None):
+        self._uprn = uprn
+        self._post_code = post_code
+        self._number = str(number)
+        self._name = name
+
+    def fetch(self):
+
+        s = requests.Session()
+
+        if self._uprn:
+            # GET request returns schedule for matching uprn
+            r = s.get(f"https://www.environmentfirst.co.uk/house.php?uprn={self._uprn}")
+            responseContent = r.text
+
+        elif (self._post_code and self._number):
+            # POST request returns schedule for matching address
+            payload = {
+                "property_no": self._number,
+                "property_name": "",
+                "street": "",
+                "postcode": self._post_code
+            }
+            r = s.post("https://www.environmentfirst.co.uk/results.php", data = payload)
+            responseContent = r.text
+
+        elif (self._post_code and self._name):
+            # POST request returns list of postcode addresses
+            payload = {
+                "property_no": "",
+                "property_name": self._name,
+                "street": "",
+                "postcode": self._post_code
+            }
+            r = s.post("https://www.environmentfirst.co.uk/results.php", data = payload)
+            responseContent = r.text
+
+            # Loop through postcode address list to find house name and uprn
+            soup = BeautifulSoup(responseContent, "html.parser")
+            table = soup.find('table')
+            for row in table.find_all('tr')[1:]:
+                if self._name in row.text:
+                    for item in row('a', href=True):
+                        self._uprn = str.split(item.get('href'), "=")[1]
+
+            # GET request returns schedule for matching uprn
+            r = s.get(f"https://www.environmentfirst.co.uk/house.php?uprn={self._uprn}")
+            responseContent = r.text
+
+        else:
+            raise Exception("Address not found")        
+
+        entries = []
+
+        # Extract waste types and dates from responseContent
+        soup = BeautifulSoup(responseContent, "html.parser")
+        x = soup.findAll("p")
+        for i in x[1:-1]: # ignores elements containing address and marketing message 
+            if " day " in i.text:
+                for round_type in ICONS:
+                    if round_type in i.text.upper():
+                        entries.append(
+                            Collection(
+                                date = parse(str.split(i.text, ":")[1]),
+                                t = round_type,
+                                icon = ICONS.get(round_type),
+                            )
+                        )
+
+        return entries

+ 33 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/example.py

@@ -0,0 +1,33 @@
+import datetime
+
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Example Source"
+DESCRIPTION = "Source for example waste collection."
+URL = None
+TEST_CASES = {"Example": {"days": 10}}
+
+
+class Source:
+    def __init__(self, days=20, per_day=2, types=5):
+        self._days = days
+        self._per_day = per_day
+        self._types = types
+
+    def fetch(self):
+        now = datetime.datetime.now().date()
+
+        entries = []
+        ap_type = 0
+
+        for day in range(self._days):
+            for idx in range(self._per_day):
+                entries.append(
+                    Collection(
+                        now + datetime.timedelta(days=day + 7),
+                        f"Type{(ap_type % self._types) + 1}",
+                    )
+                )
+                ap_type = ap_type + 1
+
+        return entries

+ 74 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/fccenvironment_co_uk.py

@@ -0,0 +1,74 @@
+import logging
+import requests
+
+from bs4 import BeautifulSoup
+from datetime import datetime
+from waste_collection_schedule import Collection
+
+TITLE = "fccenvironment.co.uk"
+
+DESCRIPTION = (
+    """Consolidated source for waste collection services for ~60 local authorities.
+        Currently supports:
+        Market Harborough
+        """
+)
+
+URL = "https://fccenvironment.co.uk"
+
+TEST_CASES = {
+    "Test_001" : {"uprn": "100030491624"},
+    "Test_002": {"uprn": "100030491614"},
+    "Test_003": {"uprn": "100030493289"},
+    "Test_004": {"uprn": "200001136341"}
+}
+
+
+ICONS = {
+    "NON-RECYCLABLE WASTE BIN COLLECTION": "mdi:trash-can",
+    "RECYCLING COLLECTION": "mdi:recycle",
+    "GARDEN WASTE COLLECTION": "mdi:leaf",
+}
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(self, uprn=None):
+        self._uprn = uprn
+
+    def fetch(self):
+
+        s = requests.Session()
+
+        if self._uprn:
+            # POST request returns schedule for matching uprn
+            payload = {
+                "Uprn": self._uprn
+            }
+            r = s.post("https://www.fccenvironment.co.uk/harborough/detail-address", data = payload)
+            responseContent = r.text
+
+        entries = []
+        # Extract waste types and dates from responseContent
+        soup = BeautifulSoup(responseContent, "html.parser")
+        services = soup.find("div", attrs={"class": "blocks block-your-next-scheduled-bin-collection-days"})
+        items = services.find_all("li")
+        for item in items:
+            date_text = item.find("span", attrs={"class": "pull-right"}).text.strip()
+            try:
+                date = datetime.strptime(date_text, "%d %B %Y").date()
+            except ValueError:
+                continue
+            else:
+                waste_type = item.text.split(' (')[0]
+                entries.append(
+                    Collection(
+                        date=date,
+                        t=waste_type,
+                        icon=ICONS.get(waste_type.upper()),    
+                    )
+                )
+
+        return entries

File diff suppressed because it is too large
+ 83 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/guildford_gov_uk.py


+ 53 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/huntingdonshire_gov_uk.py

@@ -0,0 +1,53 @@
+import json
+from datetime import datetime
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Huntingdonshire.gov.uk"
+DESCRIPTION = "Source for Huntingdonshire.gov.uk services for Huntingdonshire District Council."
+URL = "https://www.huntingdonshire.gov.uk"
+TEST_CASES = {
+    "Wells Close, Brampton": {"uprn": "100090123510"},
+    "Inkerman Rise, St. Neots": {"uprn": "10000144271"},
+}
+
+ICONS = {
+    "Refuse": "mdi:trash-can",
+    "Recycling": "mdi:recycle",
+    "Garden": "mdi:leaf",
+}
+
+
+class Source:
+    def __init__(self, uprn):
+        self._uprn = uprn
+
+    def fetch(self):
+        # get json file
+        r = requests.get(
+            f"https://servicelayer3c.azure-api.net/wastecalendar/collection/search/{self._uprn}?authority=HDC&take=20"
+        )
+
+        # extract data from json
+        data = json.loads(r.text)
+
+        entries = []
+
+        collections = r.json()["collections"]
+        entries = []
+
+        for collection in collections:
+            for round_type in collection["roundTypes"]:
+                entries.append(
+                    Collection(
+                        date=datetime.strptime(
+                            collection["date"], "%Y-%m-%dT%H:%M:%SZ"
+                        ).date(),
+                        t=round_type.title(),
+                        icon=ICONS.get(round_type),
+                    )
+                )
+
+
+        return entries

+ 69 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/hvcgroep_nl.py

@@ -0,0 +1,69 @@
+import json
+import logging
+from datetime import datetime
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "HVCGroep"
+DESCRIPTION = "Source for the Dutch HVCGroep waste management."
+URL = "https://www.hvcgroep.nl/zelf-regelen/afvalkalender"
+TEST_CASES = {"Tollebeek": {"postal_code": "8309AV", "house_number": "1"}}
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(self, postal_code, house_number):
+        self.postal_code = postal_code
+        self.house_number = house_number
+        self.icons = {
+            "plastic-blik-drinkpak": "mdi:recycle",
+            "gft": "mdi:leaf",
+            "papier-en-karton": "mdi:archive",
+            "restafval": "mdi:trash-can",
+        }
+
+    def fetch(self):
+        bag_id = 0
+
+        # Retrieve bagid (unique waste management identifier)
+        r = requests.get(
+            f"https://inzamelkalender.hvcgroep.nl/adressen/{self.postal_code}:{self.house_number}"
+        )
+        data = json.loads(r.text)
+
+        # Something must be wrong, maybe the address isn't valid? No need to do the extra requests so just return here.
+        if len(data) == 0:
+            _LOGGER.error("no data found for this address")
+            return []
+
+        bag_id = data[0]["bagid"]
+
+        # Retrieve the details about different waste management flows (for example, paper, plastic etc.)
+        r = requests.get(
+            f"https://inzamelkalender.hvcgroep.nl/rest/adressen/{bag_id}/afvalstromen"
+        )
+        waste_flows = json.loads(r.text)
+
+        # Retrieve the coming pickup dates for waste.
+        r = requests.get(
+            f"https://inzamelkalender.hvcgroep.nl/rest/adressen/{bag_id}/ophaaldata"
+        )
+        data = json.loads(r.text)
+
+        entries = []
+
+        for item in data:
+            waste_details = [
+                x for x in waste_flows if x["id"] == item["afvalstroom_id"]
+            ]
+            entries.append(
+                Collection(
+                    date=datetime.strptime(item["ophaaldatum"], "%Y-%m-%d").date(),
+                    t=waste_details[0]["title"],
+                    icon=self.icons.get(waste_details[0]["icon"], "mdi:trash-can"),
+                )
+            )
+
+        return entries

+ 60 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/hygea_be.py

@@ -0,0 +1,60 @@
+import datetime
+import json
+import time
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Hygea"
+DESCRIPTION = "Source for Hygea garbage collection"
+URL = "https://www.hygea.be/"
+TEST_CASES = {
+    "Soignies": {"streetIndex": "3758"},
+    "Frameries": {"streetIndex": "4203"},
+    "Erquelinnes": {"cp": "6560"},
+}
+
+WASTE_MAP = {
+    "om": {"type": "Ordures ménagères", "icon": "mdi:trash-can"},
+    "pmc": {"type": "PMC", "icon": "mdi:recycle"},
+    "sacvert": {"type": "Déchets Organiques", "icon": "mdi:trash-can"},
+    "fourth": {"type": "Papier & cartons", "icon": "mdi:leaf"},
+}
+
+
+class Source:
+    def __init__(self, streetIndex=None, cp=None):
+        self._street_index = streetIndex
+        self._cp = cp
+
+    def fetch(self):
+        params = {"start": int(time.time()), "end": int(time.time() + 2678400)}
+        if self._street_index is not None:
+            params["street"] = self._street_index
+            response = requests.get(
+                "https://www.hygea.be/displaycal.html", params=params
+            )
+        elif self._cp is not None:
+            params["street"] = self._cp
+            response = requests.get(
+                "https://www.hygea.be/displaycalws.html", params=params
+            )
+
+        if not response.ok:
+            return []
+        data = json.loads(response.text)
+
+        entries = []
+        for day in data:
+            date = datetime.datetime.strptime(
+                day["start"], "%Y-%m-%dT%H:%M:%S%z"
+            ).date()
+
+            # example for day["className"]: 12  notadded pos136 om multi
+            waste_types = set(day["className"].split())
+            for abbr, map in WASTE_MAP.items():
+                if abbr in waste_types:
+                    c = Collection(date=date, t=map["type"], icon=map["icon"])
+                    entries.append(c)
+
+        return entries

File diff suppressed because it is too large
+ 221 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py


+ 117 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/innerwest_nsw_gov_au.py

@@ -0,0 +1,117 @@
+import json
+from datetime import date, timedelta
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Inner West Council (NSW)"
+DESCRIPTION = "Source for Inner West Council (NSW) rubbish collection."
+URL = "https://www.innerwest.nsw.gov.au/live/waste-and-recycling/bins-and-clean-ups/waste-calendar"
+TEST_CASES = {
+    "Random address": {
+        "suburb": "Tempe",
+        "street_name": "Princes Highway",
+        "street_number": "810",
+    }
+}
+
+HEADERS = {"user-agent": "Mozilla/5.0"}
+
+
+class Source:
+    def __init__(self, suburb, street_name, street_number):
+        self.suburb = suburb
+        self.street_name = street_name
+        self.street_number = street_number
+
+    def fetch(self):
+
+        suburb_id = 0
+        street_id = 0
+        property_id = 0
+        today = date.today()
+        nextmonth = today + timedelta(30)
+
+        # Retrieve suburbs
+        r = requests.get(
+            "https://marrickville.waste-info.com.au/api/v1/localities.json", headers=HEADERS
+        )
+        data = json.loads(r.text)
+
+        # Find the ID for our suburb
+        for item in data["localities"]:
+            if item["name"] == self.suburb:
+                suburb_id = item["id"]
+                break
+
+        if suburb_id == 0:
+            return []
+
+        # Retrieve the streets in our suburb
+        r = requests.get(
+            f"https://marrickville.waste-info.com.au/api/v1/streets.json?locality={suburb_id}",
+            headers=HEADERS,
+        )
+        data = json.loads(r.text)
+
+        # Find the ID for our street
+        for item in data["streets"]:
+            if item["name"] == self.street_name:
+                street_id = item["id"]
+                break
+
+        if street_id == 0:
+            return []
+
+        # Retrieve the properties in our street
+        r = requests.get(
+            f"https://marrickville.waste-info.com.au/api/v1/properties.json?street={street_id}",
+            headers=HEADERS,
+        )
+        data = json.loads(r.text)
+
+        # Find the ID for our property
+        for item in data["properties"]:
+            if item["name"] == f"{self.street_number} {self.street_name} {self.suburb}":
+                property_id = item["id"]
+                break
+
+        if property_id == 0:
+            return []
+
+        # Retrieve the upcoming collections for our property
+        r = requests.get(
+            f"https://marrickville.waste-info.com.au/api/v1/properties/{property_id}.json?start={today}&end={nextmonth}",
+            headers=HEADERS,
+        )
+
+        data = json.loads(r.text)
+
+        entries = []
+
+        for item in data:
+            if "start" in item:
+                collection_date = date.fromisoformat(item["start"])
+                if (collection_date - today).days >= 0:
+                    # Only consider recycle and organic events
+                    if item["event_type"] in ["recycle","organic"]:
+                        # Every collection day includes rubbish
+                        entries.append(
+                            Collection(
+                                date=collection_date, t="Rubbish", icon="mdi:trash-can"
+                            )
+                        )
+                        if item["event_type"] == "recycle":
+                            entries.append(
+                                Collection(
+                                    date=collection_date, t="Recycling", icon="mdi:recycle"
+                                )
+                            )
+                        if item["event_type"] == "organic":
+                            entries.append(
+                                Collection(
+                                    date=collection_date, t="Garden", icon="mdi:leaf"
+                                )
+                            )
+
+        return entries

+ 118 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/ipswich_qld_gov_au.py

@@ -0,0 +1,118 @@
+import datetime
+import urllib
+from html.parser import HTMLParser
+
+import requests
+from waste_collection_schedule import Collection
+
+TITLE = "Ipswich City Council"
+DESCRIPTION = "Source for Ipswich City Council rubbish collection."
+URL = "https://www.ipswich.qld.gov.au/live/waste-and-recycling/bin-collection-calendar"
+TEST_CASES = {
+    "Camira State School": {"street": "184-202 Old Logan Rd", "suburb": "Camira"},
+    "Random": {"street": "50 Brisbane Road", "suburb": "Redbank"},
+}
+
+
+ICONS = {
+    "Waste Bin": "mdi:trash-can",
+    "Recycle Bin": "mdi:recycle",
+    "FOGO Bin": "mdi:leaf",
+}
+
+
+def toDate(dateStr: str):
+    items = dateStr.split("-")
+    return datetime.date(int(items[1]), int(items[2]), int(items[3]))
+
+
+class IpswichGovAuParser(HTMLParser):
+    def __init__(self):
+        super().__init__()
+        self._entries = []
+        self._state = None
+        self._level = 0
+        self._class = ""
+        self._li_level = 0
+        self._li_valid = False
+        self._span_level = 0
+        self._load_date = False
+        self._load_bin = False
+        self._loaded_date = None
+
+    @property
+    def entries(self):
+        return self._entries
+
+    def handle_endtag(self, tag):
+
+        if tag == "li":
+            self._li_level -= 1
+            self._loaded_date = None
+
+        if tag == "span":
+            self._span_level -= 1
+
+    def handle_starttag(self, tag, attrs):
+
+        d = dict(attrs)
+        cls = d.get("class", "")
+
+        if tag == "li":
+            self._li_level += 1
+            if self._li_level == 1 and cls == "WBD-result-item":
+                self._li_valid = True
+            else:
+                self._li_valid = False
+                self._loaded_date = None
+
+        if tag == "span":
+            self._span_level += 1
+            if self._li_valid and self._span_level == 1 and cls == "WBD-event-date":
+                self._load_date = True
+
+            if self._li_valid and self._span_level == 3 and cls == "WBD-bin-text":
+                self._load_bin = True
+
+    def handle_data(self, data):
+        if not self._li_valid:
+            return
+
+        if self._load_date:
+            self._load_date = False
+
+            items = data.strip().split("-")
+            self._loaded_date = datetime.date(
+                int(items[0]), int(items[1]), int(items[2])
+            )
+
+        if self._load_bin:
+            self._load_bin = False
+
+            self._entries.append(
+                Collection(
+                    self._loaded_date, data, icon=ICONS.get(data, "mdi:trash-can")
+                )
+            )
+
+
+class Source:
+    def __init__(self, street, suburb):
+        self._street = street
+        self._suburb = suburb
+
+    def fetch(self):
+
+        address = urllib.parse.quote_plus(f"{self._street}, {self._suburb}")
+        params = {
+            "apiKey": "b8dbca0c-ad9c-4f8a-8b9c-080fd435c5e7",
+            "agendaResultLimit": "3",
+            "dateFormat": "yyyy-MM-dd",
+            "displayFormat": "agenda",
+            "address": f"{address}+QLD%2C+Australia",
+        }
+
+        r = requests.get("https://console.whatbinday.com/api/search", params=params)
+        p = IpswichGovAuParser()
+        p.feed(r.text)
+        return p.entries

+ 43 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/jumomind_de.py

@@ -0,0 +1,43 @@
+import datetime
+import json
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Jumomind"
+DESCRIPTION = "Source for Jumomind.de waste collection."
+URL = "https://www.jumomind.de"
+TEST_CASES = {
+    "ZAW": {"service_id": "zaw", "city_id": 106, "area_id": 94},
+    "Bad Homburg, Bahnhofstrasse": {"service_id": "hom", "city_id": 1, "area_id": 411},
+    "Bad Buchau via MyMuell": {
+        "service_id": "mymuell",
+        "city_id": 3031,
+        "area_id": 3031,
+    },
+}
+
+
+class Source:
+    def __init__(self, service_id, city_id, area_id):
+        self._service_id = service_id
+        self._city_id = city_id
+        self._area_id = area_id
+
+    def fetch(self):
+        args = {"r": "dates/0", "city_id": self._city_id, "area_id": self._area_id}
+
+        # get json file
+        r = requests.get(
+            f"https://{self._service_id}.jumomind.com/mmapp/api.php", params=args
+        )
+
+        entries = []
+
+        data = json.loads(r.text)
+        for d in data:
+            entries.append(
+                Collection(datetime.date.fromisoformat(d["day"]), d["title"])
+            )
+
+        return entries

+ 62 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/kaev_niederlausitz.py

@@ -0,0 +1,62 @@
+import requests
+import html
+import json
+
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "KAEV Niederlausitz"
+DESCRIPTION = "Source for Kommunaler Abfallverband niederlausitz waste collection."
+URL = "https://www.kaev.de/"
+URL_ADDRESS = 'https://www.kaev.de/Templates/Content/DetailTourenplanWebsite/ajax.aspx/getAddress'
+TEST_CASES = {
+    "Luckau / OT Zieckau": {
+        "abf_suche": "Luckau / OT Zieckau",
+    },
+    "Luckau Bersteweg": {
+        "abf_suche": "Luckau / Bersteweg",
+    },
+    "Staakow": {
+        "abf_suche": "Staakow",
+    },
+}
+
+def get_kalender_id(search):
+    s=requests.Session()
+    s.get('https://www.kaev.de/')
+    payload={"query": search}
+    resp = s.post(URL_ADDRESS, json=payload).json()
+    abf_cal = json.loads(resp["d"])
+    return abf_cal
+
+class Source:
+    def __init__(self, abf_suche):
+        self._abf_suche = abf_suche
+        self._ics = ICS()
+
+    def fetch(self):
+        abf_kalender = get_kalender_id(self._abf_suche)
+        if len(abf_kalender) == 1:
+            for abf_daten in abf_kalender:
+                calurl = "https://www.kaev.de/Templates/Content/DetailTourenplanWebsite/iCal.aspx?Ort=" + abf_daten["name"] + "&OrtId=" + str(abf_daten["ortId"]) + "&OrtsteilId=" + str(abf_daten["ortsteilId"])
+                calurl = html.escape(calurl)
+        elif "/" not in self._abf_suche:
+            for abf_daten in abf_kalender[0:1]:
+                    abf_kalender = abf_kalender[0:1]
+                    calurl = "https://www.kaev.de/Templates/Content/DetailTourenplanWebsite/iCal.aspx?Ort=" + abf_daten["name"] + "&OrtId=" + str(abf_daten["ortId"])
+                    calurl = html.escape(calurl)
+
+        if len(abf_kalender) > 1:
+            raise Exception("Error: Mehrere Einträge gefunden")
+
+        if len(abf_kalender) == 0:
+            raise Exception("Error: Keine Einträge gefunden")
+        
+        r=requests.get(calurl)
+        r.encoding = "utf-8"
+
+        dates = self._ics.convert(r.text)
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1].removesuffix(", ")))
+        return entries

+ 114 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py

@@ -0,0 +1,114 @@
+import datetime
+import json
+import requests
+
+from bs4 import BeautifulSoup
+from requests.utils import requote_uri
+from waste_collection_schedule import Collection
+
+TITLE = "Ku-ring-gai Council"
+DESCRIPTION = "Source for Ku-ring-gai Council waste collection."
+URL = "https://www.krg.nsw.gov.au"
+TEST_CASES = {
+    "randomHouse": {
+        "post_code": "2070",
+        "suburb": "LINDFIELD",
+        "street_name": "Wolseley Road",
+        "street_number": "42",
+    },
+    "randomAppartment": {
+        "post_code": "2074",
+        "suburb": "WARRAWEE",
+        "street_name": "Cherry Street",
+        "street_number": "4/9",
+    },
+    "randomMultiunit": {
+        "post_code": "2075",
+        "suburb": "ST IVES",
+        "street_name": "Kitchener Street",
+        "street_number": "99/2-8",
+    },
+}
+
+API_URLS = {
+    "session":"https://www.krg.nsw.gov.au" ,
+    "search": "https://www.krg.nsw.gov.au/api/v1/myarea/search?keywords={}",
+    "schedule": "https://www.krg.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU",
+}
+
+HEADERS = {
+    "user-agent": "Mozilla/5.0",
+}
+
+ICON_MAP = {
+    "GeneralWaste": "mdi:trash-can",
+    "Recycling": "mdi:recycle",
+    "GreenWaste": "mdi:leaf",
+}
+
+ROUNDS = {
+    "GeneralWaste": "General Waste",
+    "Recycling": "Recycling",
+    "GreenWaste": "Green Waste",
+}
+
+# _LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(
+        self, post_code: str, suburb: str, street_name: str, street_number: str
+    ):
+        self.post_code = post_code
+        self.suburb = suburb.upper()
+        self.street_name = street_name
+        self.street_number = street_number
+
+    def fetch(self):
+
+        locationId = 0
+
+        # 'collection' api call seems to require an ASP.Net_sessionID, so obtain the relevant cookie
+        s = requests.Session()
+        q = requote_uri(str(API_URLS["session"]))
+        r0 = s.get(q, headers = HEADERS)
+
+        # Do initial address search
+        address = "{} {}, {} NSW {}".format(self.street_number, self.street_name, self.suburb, self.post_code)
+        q = requote_uri(str(API_URLS["search"]).format(address))
+        r1 = s.get(q, headers = HEADERS)
+        data = json.loads(r1.text)
+
+        # Find the geolocation for the address
+        for item in data["Items"]:
+            if address in item['AddressSingleLine']:
+                locationId = item["Id"]
+            break
+
+        if locationId == 0:
+            return []
+
+        # Retrieve the upcoming collections for location
+        q = requote_uri(str(API_URLS["schedule"]).format(locationId))
+        r2 = s.get(q, headers = HEADERS)
+        data = json.loads(r2.text)
+        responseContent = data["responseContent"]
+
+        soup = BeautifulSoup(responseContent, "html.parser")
+        services = soup.find_all("article")
+        
+        entries = []
+
+        for item in services:
+            waste_type = item.find('h3').text
+            date = datetime.datetime.strptime(item.find('div', {'class': 'next-service'}).text.strip(), "%a %d/%m/%Y").date()
+            entries.append(
+                Collection(
+                    date = date,
+                    # t=waste_type,  # api returns GeneralWaste, Recycling, GreenWaste 
+                    t = ROUNDS.get(waste_type),  # returns user-friendly General Waste, Recycling, Green Waste
+                    icon=ICON_MAP.get(waste_type, "mdi:trash-can"),
+                )
+            )
+
+        return entries

+ 52 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/kwb_goslar_de.py

@@ -0,0 +1,52 @@
+import requests
+from waste_collection_schedule import Collection
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "KreisWirtschaftsBetriebe Goslar"
+DESCRIPTION = "Source for kwb-goslar.de waste collection."
+URL = "https://www.kwb-goslar.de/Abfallwirtschaft/Abfuhr/"
+TEST_CASES = {
+    "Berliner Straße (Clausthal-Zellerfeld)": {"pois": "2523.602"},
+    "Braunschweiger Straße (Seesen)": {"pois": "2523.409"},
+}
+
+ICON_MAP = {
+    "Baum- und Strauchschnitt": "mdi:leaf",
+    "Biotonne": "mdi:bio",
+    "Blaue Tonne": "mdi:newspaper-variant-multiple",
+    "Gelber Sack": "mdi:recycle",
+    "Restmülltonne": "mdi:trash-can",
+    "Weihnachtsbäume": "mdi:pine-tree",
+}
+
+
+class Source:
+    def __init__(self, pois):
+        self.ics = ICS()
+        self.pois = pois
+
+    def fetch(self):
+        r = requests.get(
+            url="https://www.kwb-goslar.de/output/options.php",
+            params={
+                "ModID": "48",
+                "call": "ical",
+                "pois": self.pois,
+            },
+            headers={
+                "Referer": "https://www.kwb-goslar.de",
+            },
+        )
+
+        if not r.ok:
+            raise Exception(f"Error: failed to fetch url: {r.request.url}")
+
+        dates = self.ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            date, waste_type = d
+            icon = ICON_MAP.get(waste_type, "mdi:trash-can-outline")
+            entries.append(Collection(date=date, t=waste_type, icon=icon))
+
+        return entries

+ 55 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/landkreis_rhoen_grabfeld.py

@@ -0,0 +1,55 @@
+import datetime
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Source for Rhön Grabfeld"
+DESCRIPTION = "Source for Rhönn Grabfeld uses service by offizium."
+URL = 'https://fs-api-rg.offizium.com/abfalltermine'
+ICON_MAP = {
+    "Restmüll/Gelber Sack/Biotonne": "mdi:trash-can",
+    "Papiersammlung": "mdi:package-variant",
+    "Problemmüllsammlung": "mdi:biohazard"
+}
+EVENT_BLACKLIST = ['Wertstoffhof Mellrichstadt',
+                   'Wertstoffhof Bad Königshofen', 'Wertstoffzentrum Bad Neustadt',
+                   'Wertstoffsammelstelle Ostheim',
+                   'Wertstoffsammelstelle Bischofsheim']
+TEST_CASES = {
+    "City only": {"city": "Ostheim"},
+    "City + District": {"city": "Ostheim", "district": "Oberwaldbehrungen"},
+    "District only": {"district": "Oberwaldbehrungen"},
+    "empty": {}
+}
+
+
+class Source:
+    def __init__(self, city: str = None, district: str = None):
+        self._city = city
+        self._district = district
+
+    def fetch(self):
+        now = datetime.datetime.now().date()
+
+        r = requests.get(URL, params={
+            "stadt": self._city,
+            "ortsteil": self._district
+        })
+
+        r.raise_for_status()
+
+        entries = []
+        for event in r.json():
+            # filter out Sammelstellen, Wertstoffhof and Wertstoffzentrum
+            if event["muellart"] not in EVENT_BLACKLIST:
+                entries.append(
+                    Collection(
+                        date=datetime.datetime.fromisoformat(
+                            event["termin"]).date(),
+                        t=event["muellart"],
+                        icon=ICON_MAP.get(
+                            event["muellart"], "mdi:trash-can")
+                    )
+                )
+
+        return entries

+ 120 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/landkreis_wittmund_de.py

@@ -0,0 +1,120 @@
+import requests
+import json
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+from bs4 import BeautifulSoup
+
+TITLE = "Landkreis-Wittmund.de"
+DESCRIPTION = "Source for Landkreis Wittmund waste collection."
+URL = "https://www.landkreis-wittmund.de/Leben-Wohnen/Wohnen/Abfall/Abfuhrkalender/"
+AUTOCOMPLETE_URL = "https://www.landkreis-wittmund.de/output/autocomplete.php?out=json&type=abto&mode=&select=2&refid={}&term="
+DOWNLOAD_URL = "https://www.landkreis-wittmund.de/output/options.php?ModID=48&call=ical&ArtID%5B0%5D=3105.1&ArtID%5B1%5D=1.4&ArtID%5B2%5D=1.2&ArtID%5B3%5D=1.3&ArtID%5B4%5D=1.1&pois={}&alarm=0"
+
+TEST_CASES = {
+    "CityWithoutStreet": {
+        "city": "Werdum",
+    },
+    "CityWithStreet": {
+        "city": "Werdum",
+        "street": "alle Straßen",
+    },
+}
+
+class Source:
+    def __init__(self, city, street=None):
+        self._city = city
+        self._street = street
+        self._ics = ICS()
+
+    def fetch(self):
+        cityId = self.fetch_city_id(self._city)
+        streetId = self.fetch_street_id(cityId, self._street)
+
+        return self.fetch_ics(DOWNLOAD_URL.format(streetId))
+
+    def is_city_selection(self, tag, cityName):
+        return tag['value'] != "" and tag.string == self._city
+
+    def fetch_city_id(self, cityName):
+        r = requests.get(URL)
+        if not r.ok:
+            raise Exception(
+                "Error: failed to fetch url: {}".format(
+                    URL
+                )
+            )
+
+        soup = BeautifulSoup(r.text, 'html.parser')
+        citySelection = [ a for a in soup.select('#sf_locid > option[value]') if self.is_city_selection(a, cityName) ]
+        if len(citySelection) == 0:
+            raise Exception(
+                "Error: could not find id for city: '{}'".format(
+                    cityName
+                )
+            )
+
+        if len(citySelection) > 1:
+            raise Exception(
+                "Error: non-unique match for city: '{}'".format(
+                    cityName
+                )
+            )
+
+        return citySelection[0]['value']
+
+    def fetch_street_id(self, cityId, streetName):
+        r = requests.get(AUTOCOMPLETE_URL.format(cityId, streetName), headers={
+            "Referer": URL
+        })
+
+        if not r.ok:
+            raise Exception(
+                "Error: failed to fetch url: {}".format(
+                    AUTOCOMPLETE_URL.format(cityId, streetName)
+                )
+            )
+
+        streets = json.loads(r.text)
+        if streetName != None:
+            streetId = [ item[0] for item in streets if streetName in item[1] ]
+        else:
+            streetId = [ item[0] for item in streets ]
+
+        if len(streetId) == 0:
+            raise Exception(
+                "Error: could not find streets for city id / street: {}, '{}'".format(
+                    cityId,
+                    streetName
+                )
+            )
+
+        if len(streetId) > 1:
+            raise Exception(
+                "Error: non-unique match for city id / street: {}, '{}'".format(
+                    cityId,
+                    streetName
+                )
+            )
+
+        return streetId[0]
+
+    def fetch_ics(self, url):
+        r = requests.get(url, headers={
+            "Referer": URL
+        })
+
+        if not r.ok:
+            raise Exception(
+                "Error: failed to fetch url: {}".format(
+                    url
+                )
+            )
+
+        # parse ics file
+        r.encoding = "utf-8"
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries

+ 56 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/lerum_se.py

@@ -0,0 +1,56 @@
+# coding: utf-8
+from datetime import datetime
+import json
+from urllib.parse import urlencode
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Lerum Vatten och Avlopp"
+DESCRIPTION = "Source for Lerum Vatten och Avlopp waste collection."
+URL = "https://vatjanst.lerum.se/FutureWeb/SimpleWastePickup/SimpleWastePickup"
+TEST_CASES = {
+    "PRO": {"street_address": "Floda stationsväg 5, Floda"},
+    "Polisen": {"street_address": "Göteborgsvägen 16, Lerum"},
+}
+
+
+class Source:
+    def __init__(self, street_address):
+        self._street_address = street_address
+
+    def fetch(self):
+        response = requests.post(
+            "https://vatjanst.lerum.se/FutureWeb/SimpleWastePickup/SearchAdress",
+            {"searchText": self._street_address}
+        )
+
+        address_data = json.loads(response.text)
+        address = None
+        if address_data["Succeeded"] and address_data["Succeeded"] is True:
+            if address_data["Buildings"] and len(address_data["Buildings"]) > 0:
+                address = address_data["Buildings"][0]
+
+        if not address:
+            return []
+
+        query_params = urlencode({"address": address})
+        response = requests.get(
+            "https://vatjanst.lerum.se/FutureWeb/SimpleWastePickup/GetWastePickupSchedule?{}"
+            .format(query_params)
+        )
+        data = json.loads(response.text)
+
+        entries = []
+        for item in data["RhServices"]:
+            waste_type = item["WasteType"]
+            icon = "mdi:trash-can"
+            if waste_type == "Matavfall":
+                icon = "mdi:leaf"
+            next_pickup = item["NextWastePickup"]
+            next_pickup_date = datetime.fromisoformat(next_pickup).date()
+            entries.append(
+                Collection(date=next_pickup_date, t=waste_type, icon=icon)
+            )
+
+        return entries

+ 58 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/lindau_ch.py

@@ -0,0 +1,58 @@
+import json
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Abfall Lindau"
+DESCRIPTION = "Source for Lindau waste collection."
+URL = "https://www.lindau.ch/abfalldaten"
+TEST_CASES = {
+    "Tagelswangen": {"city": "Tagelswangen"},
+    "Grafstal": {"city": "190"},
+}
+
+
+IconMap = {
+    "kehricht": "mdi:trash-can",
+    "grungut": "mdi:leaf",
+    "hackseldienst": "mdi:leaf",
+    "papier und karton": "mdi:package-variant",
+    "altmetalle": "mdi:nail",
+}
+
+
+class Source:
+    def __init__(self, city):
+        self._city = city
+
+    def fetch(self):
+        response = requests.get("https://www.lindau.ch/abfalldaten")
+
+        html = BeautifulSoup(response.text, "html.parser")
+
+        table = html.find("table", attrs={"id": "icmsTable-abfallsammlung"})
+        data = json.loads(table.attrs["data-entities"])
+
+        entries = []
+        for item in data["data"]:
+            if (
+                self._city in item["abfallkreisIds"]
+                or self._city in item["abfallkreisNameList"]
+            ):
+                next_pickup = item["_anlassDate-sort"].split()[0]
+                next_pickup_date = datetime.fromisoformat(next_pickup).date()
+
+                waste_type = BeautifulSoup(item["name"], "html.parser").text
+                waste_type_sorted = BeautifulSoup(item["name-sort"], "html.parser").text
+
+                entries.append(
+                    Collection(
+                        date=next_pickup_date,
+                        t=waste_type,
+                        icon=IconMap.get(waste_type_sorted, "mdi:trash-can"),
+                    )
+                )
+
+        return entries

+ 37 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/lrasha_de.py

@@ -0,0 +1,37 @@
+import datetime
+import requests
+from waste_collection_schedule import Collection
+from waste_collection_schedule.service.ICS import ICS
+
+
+TITLE = "Landkreis Schwäbisch Hall"
+DESCRIPTION = "Source for lrasha.de - Landkreis Schwäbisch Hall"
+URL = "http://exchange.cmcitymedia.de/landkreis-schwaebisch-hallt3/wasteCalendarExport.php?location="
+# https://www.lrasha.de/de/buergerservice/abfallwirtschaft/abfallkalender
+
+TEST_CASES = {
+    "Ilshofen": {"location": "114"}
+}
+
+HEADERS = {"user-agent": "Mozilla/5.0 (xxxx Windows NT 10.0; Win64; x64)"}
+
+
+class Source:
+    def __init__(self, location):
+        self._location = location
+        self._ics = ICS()
+
+    def fetch(self):
+        # get ics file
+        full_url = URL + str(self._location)
+        r = requests.get(full_url, headers=HEADERS)
+        r.raise_for_status()
+        
+        # parse ics file
+        r.encoding = "utf-8"
+        dates = self._ics.convert(r.text)
+        
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries

+ 73 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/manchester_uk.py

@@ -0,0 +1,73 @@
+from datetime import datetime
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+from bs4 import BeautifulSoup
+from urllib.parse import urlsplit, parse_qs
+import logging
+
+TITLE = "manchester.gov.uk"
+DESCRIPTION = "Source for bin collection services for Manchester City Council, UK."
+URL = "https://www.manchester.gov.uk/bincollections/"
+TEST_CASES = {
+    "domestic": {'uprn': '000077065560'},
+}
+
+ICONS = {
+    "Black / Grey Bin": "mdi:trash-can",
+    "Blue Bin": "mdi:recycle",
+    "Brown Bin": "mdi:glass-fragile",
+    "Green Bin": "mdi:leaf",
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(
+        self, uprn: int = None
+    ):
+        self._uprn = uprn
+        if not self._uprn:
+            _LOGGER.error(
+                "uprn must be provided in config"
+            )
+        self._session = requests.Session()
+
+    def fetch(self):
+        entries = []
+
+        r = requests.post(
+            URL,
+            data={
+                "mcc_bin_dates_uprn": self._uprn,
+                "mcc_bin_dates_submit": "Go"
+            },
+        )
+
+        soup = BeautifulSoup(r.text, features="html.parser")
+        results = soup.find_all("div", {"class": "collection"})
+
+        for result in results:
+            date = result.find("p", {"class": "caption"})
+            dates = []
+            dates.append(str(date.text).replace("Next collection ", "", 1))
+            for date in result.find_all('li'):
+                dates.append(date.text)
+            img_tag = result.find("img")
+            collection_type = img_tag["alt"]
+            for current_date in dates:
+                try:
+                    date = datetime.strptime(current_date, "%A %d %b %Y").date()
+                    entries.append(
+                        Collection(
+                            date=date,
+                            t=collection_type,
+                            icon=ICONS[collection_type],
+                        )
+                    )
+                except ValueError:
+                    _LOGGER.error(f"Skipped {current_date} as it does not match time format")
+
+        return entries

+ 83 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/melton_vic_gov_au.py

@@ -0,0 +1,83 @@
+import logging
+import re
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Melton City Council"
+DESCRIPTION = "Source for Melton City Council rubbish collection."
+URL = "https://www.melton.vic.gov.au/My-Area"
+TEST_CASES = {
+    "Tuesday A": {"street_address": "23 PILBARA AVENUE BURNSIDE 3023"},
+    "Tuesday B": {"street_address": "29 COROWA CRESCENT BURNSIDE 3023"},
+    "Wednesday A": {"street_address": "2 ASPIRE BOULEVARD FRASER RISE 3336"},
+    "Wednesday B": {"street_address": "17 KEYNES CIRCUIT FRASER RISE 3336"},
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+ICON_MAP = {
+    "Food and Green Waste": "mdi:leaf",
+    "Hard Waste": "mdi:sofa",
+    "Recycling": "mdi:recycle",
+}
+
+
+class Source:
+    def __init__(self, street_address):
+        self._street_address = street_address
+
+    def fetch(self):
+        session = requests.Session()
+
+        response = session.get("https://www.melton.vic.gov.au/My-Area")
+        response.raise_for_status()
+
+        response = session.get(
+            "https://www.melton.vic.gov.au/api/v1/myarea/search",
+            params={"keywords": self._street_address},
+        )
+        response.raise_for_status()
+        addressSearchApiResults = response.json()
+        if (
+            addressSearchApiResults["Items"] is None
+            or len(addressSearchApiResults["Items"]) < 1
+        ):
+            _LOGGER.error(
+                f"Address search for '{self._street_address}' returned no results. Check your address on https://www.melton.vic.gov.au/My-Area"
+            )
+            return []
+
+        addressSearchTopHit = addressSearchApiResults["Items"][0]
+        _LOGGER.debug("Address search top hit: %s", addressSearchTopHit)
+
+        geolocationid = addressSearchTopHit["Id"]
+        _LOGGER.debug("Geolocationid: %s", geolocationid)
+
+        response = session.get(
+            "https://www.melton.vic.gov.au/ocapi/Public/myarea/wasteservices?ocsvclang=en-AU",
+            params={"geolocationid": geolocationid},
+        )
+        response.raise_for_status()
+
+        wasteApiResult = response.json()
+        _LOGGER.debug("Waste API result: %s", wasteApiResult)
+
+        soup = BeautifulSoup(wasteApiResult["responseContent"], "html.parser")
+
+        entries = []
+        for article in soup.find_all("article"):
+            waste_type = article.h3.string
+            icon = ICON_MAP.get(waste_type, "mdi:trash-can")
+            next_pickup = article.find(class_="next-service").string.strip()
+            if re.match(r"[^\s]* \d{1,2}\/\d{1,2}\/\d{4}", next_pickup):
+                next_pickup_date = datetime.strptime(
+                    next_pickup.split(sep=" ")[1], "%d/%m/%Y"
+                ).date()
+                entries.append(
+                    Collection(date=next_pickup_date, t=waste_type, icon=icon)
+                )
+
+        return entries

+ 97 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/miljoteknik_se.py

@@ -0,0 +1,97 @@
+import json
+import re
+import requests
+from bs4 import BeautifulSoup as bs
+from datetime import datetime
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+# Ronneby Miljöteknik, Blekinge Sweden
+#
+# The public URL is http://www.fyrfackronneby.se/hamtningskalender/
+# However, this uses an iframe from http://35.228.122.136 which is the
+# actual service providing the bin data.
+#
+# One first has to do a search since they put an ID in the list of
+# search results which is required when sending the request to get the
+# bin data. The data then comes injected into a script tag as it's
+# normally used to build a browseable calendar for easy viewing.
+#
+# Bins in this municipality have four types of waste each, and each
+# house has 2 bins, example raw data for the two bins:
+#
+# { title: 'Kärl 1 –  373 liter: Mat, Brännbart, färgat glas, tidningar.', start: '2023-09-12' },
+# { title: 'Kärl 2 –  373 liter: Plast, pappersförpackningar, ofärgat glas, metall.', start: '2023-09-05' },
+#
+# The API will return about a years worth of bin collection dates
+# and only the dates will change, title remains the same for the two
+# bins. First one being Food, Burnables, Colored glass and Newspapers,
+# and the second is Plastics, Cardboard, Non-colored glass and Metal.
+#
+# Note: This API does not apply for apartment buildings, municipal/state
+# services or similar types of buildings as those do not have the same
+# types of bins as regular houses. There is currently no known API for
+# those bins, only for the so called "Fyrfack" bins (meaning four slots).
+#
+
+TITLE = "Ronneby Miljöteknik Sophämntning"
+DESCRIPTION = "Source for Ronneby Miljöteknik waste collection."
+URL = (
+    "http://www.fyrfackronneby.se/hamtningskalender/"
+)
+TEST_CASES = {
+    "Home": {"street_address": "Hjortsbergavägen 16, Johannishus"}
+}
+
+
+class Source:
+    def __init__(self, street_address):
+        addr_parts = street_address.split(',')
+        self._street_address = addr_parts[0]
+        self._city = addr_parts[1].lstrip()
+
+    def fetch(self):
+        data = {"search_address": self._street_address}
+        headers = {
+            'Accept-Encoding': 'identity',
+            'Accept': '*/*',
+            'Accept-Language': 'sv-SE,sv;q=0.9',
+            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
+        }
+        response = requests.post(
+            "http://35.228.122.136/search_suggestions.php",
+            data=data,
+            headers=headers
+        )
+
+        soup = bs(response.text, 'html.parser')
+        pickup_id = False
+        for el_addr in soup.find_all('span', attrs={'class': 'address'}):
+            if el_addr.string == self._street_address:
+                for el_addr_sib in el_addr.next_siblings:
+                    if el_addr_sib.name == 'span' and el_addr_sib.string == self._city:
+                        pickup_id = el_addr.parent['id']
+                        break
+                if pickup_id:
+                    break
+        if not pickup_id:
+            return []
+
+        data = {
+            "chosen_address": "{} {}".format(self._street_address, self._city),
+            "chosen_address_pickupid": pickup_id
+        }
+        response = requests.post(
+            "http://35.228.122.136/get_data.php",
+            data=data,
+            headers=headers
+        )
+
+        entries = []
+        for entry in re.findall(r'{.title:[^}]+}', response.text):
+            json_entry = json.loads(re.sub(r'(title|start):', r'"\1":', entry.replace("'", '"')))
+            # Same icon always, due to two bins both being various recycled things
+            icon = "mdi:recycle"
+            waste_type = json_entry['title'].split(':')[1].lstrip()
+            pickup_date = datetime.fromisoformat(json_entry['start']).date()
+            entries.append(Collection(date=pickup_date, t=waste_type, icon=icon))
+        return entries

+ 110 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/minrenovasjon_no.py

@@ -0,0 +1,110 @@
+import requests
+import urllib.parse
+import json
+import datetime
+import re
+
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from pprint import pprint
+
+TITLE = "Min Renovasjon"
+DESCRIPTION = "Source for Norkart Komtek MinRenovasjon (Norway)."
+URL = "https://www.norkart.no/komtek/renovasjon/"
+
+# **street_code:** \
+# **county_id:** \
+# Can be found with this REST-API call.
+# ```
+# https://ws.geonorge.no/adresser/v1/#/default/get_sok
+# https://ws.geonorge.no/adresser/v1/sok?sok=Min%20Gate%2012
+# ```
+# "street_code" equals to "adressekode" and "county_id" equals to "kommunenummer".
+
+TEST_CASES = {
+    "Sandvika Rådhus": {
+        "street_name": "Rådhustorget",
+        "house_number": 2,
+        "street_code": 2469,
+        "county_id": 3024
+    }
+}
+
+BASE_URL = "https://komteksky.norkart.no/komtek.renovasjonwebapi/api/"
+APP_KEY  = "AE13DEEC-804F-4615-A74E-B4FAC11F0A30"
+
+class Source:
+    def __init__(self, street_name, house_number, street_code, county_id):
+        self._street_name = street_name
+        self._house_number = house_number
+        self._street_code = street_code
+        self._county_id = county_id
+        self._icon_map  = {
+            "":                        "mdi:trash-can",
+            "brush":                   "mdi:trash-can",
+            "elektriskogelektronisk":  "mdi:chip",
+            "farligavfall":            "mdi:trash-can",
+            "glassogmetallemballasje": "mdi:trash-can",
+            "hageavfall":              "mdi:leaf",
+            "klaerogsko":              "mdi:hanger",
+            "matavfall":               "mdi:trash-can",
+            "matrestavfall":           "mdi:trash-can",
+            "matrestavfallplast":      "mdi:trash-can",
+            "metall":                  "mdi:trash-can",
+            "papir":                   "mdi:newspaper-variant-multiple",
+            "pappogkartong":           "mdi:archive",
+            "plastemballasje":         "mdi:trash-can",
+            "restavfall":              "mdi:trash-can",
+            "drikkekartong":           "mdi:newspaper-variant-multiple",
+            "papppapirdrikkekartong":  "mdi:newspaper-variant-multiple",
+            "trevirke":                "mdi:trash-can"
+
+
+        } 
+
+    def fetch(self):
+        headers = {
+            'Kommunenr': str(self._county_id),
+            'RenovasjonAppKey': APP_KEY,
+            'user-agent': 'Home-Assitant-waste-col-sched/0.1'
+        }
+        args = {}
+
+        r = requests.get(BASE_URL + 'fraksjoner', params = args, headers = headers)
+
+        type = {}
+        for f in json.loads(r.content):
+            # pprint(f)
+            icon = "mdi:trash-can"
+            icon_name = re.sub(r"^.*?/(\w+)\.\w{3,4}$", "\\1", f['Ikon'])
+            if icon_name in self._icon_map:
+                icon = self._icon_map[icon_name]
+            type[f['Id']] = {
+                'name': f['Navn'],
+                'image': f['Ikon'],
+                'icon': icon
+            }
+
+        args = {
+            'gatenavn': self._street_name,
+            'husnr': self._house_number,
+            'gatekode': self._street_code,
+
+        }
+
+        r = requests.get(BASE_URL + 'tommekalender', params = args, headers = headers)
+
+        entries = []
+        for f in json.loads(r.content):
+            for d in f['Tommedatoer']:
+                entries.append(
+                    Collection(
+                        date = datetime.datetime.strptime(
+                            d, "%Y-%m-%dT%H:%M:%S"
+                        ).date(),
+                        t = type[f['FraksjonId']]['name'],
+                        icon = type[f['FraksjonId']]['icon'],
+                        picture = type[f['FraksjonId']]['image']
+                    )
+                )
+
+        return entries

+ 81 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/mrsc_vic_gov_au.py

@@ -0,0 +1,81 @@
+import logging
+from datetime import datetime
+import re
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Macedon Ranges Shire Council"
+DESCRIPTION = "Source for Macedon Ranges Shire Council rubbish collection."
+URL = "https://www.mrsc.vic.gov.au/Live-Work/Bins-Rubbish-Recycling/Bins-and-collection-days/Bin-collection-days"
+TEST_CASES = {
+    "Macedon IGA": {"street_address": "20 Victoria Street, Macedon"},
+    "ALDI Gisborne": {"street_address": "45 Aitken Street, Gisborne"},
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+ICON_MAP = {
+    "FOGO bin": "mdi:leaf",
+    "Recycling bin": "mdi:recycle",
+    "Glass-only bin": "mdi:glass-fragile",
+}
+
+
+class Source:
+    def __init__(self, street_address):
+        self._street_address = street_address
+
+    def fetch(self):
+        session = requests.Session()
+
+        response = session.get(
+            "https://www.mrsc.vic.gov.au/Live-Work/Bins-Rubbish-Recycling/Bins-and-collection-days/Bin-collection-days"
+        )
+        response.raise_for_status()
+
+        response = session.get(
+            "https://www.mrsc.vic.gov.au/api/v1/myarea/search",
+            params={"keywords": self._street_address},
+        )
+        response.raise_for_status()
+        addressSearchApiResults = response.json()
+        if (
+            addressSearchApiResults["Items"] is None
+            or len(addressSearchApiResults["Items"]) < 1
+        ):
+            _LOGGER.error(
+                f"Address search for '{self._street_address}' returned no results. Check your address on https://www.mrsc.vic.gov.au/Live-Work/Bins-Rubbish-Recycling/Bins-and-collection-days/Bin-collection-days"
+            )
+            return []
+
+        addressSearchTopHit = addressSearchApiResults["Items"][0]
+        _LOGGER.debug("Address search top hit: %s", addressSearchTopHit)
+
+        geolocationid = addressSearchTopHit["Id"]
+        _LOGGER.debug("Geolocationid: %s", geolocationid)
+
+        response = session.get(
+            "https://www.mrsc.vic.gov.au/ocapi/Public/myarea/wasteservices?ocsvclang=en-AU",
+            params={"geolocationid": geolocationid},
+        )
+        response.raise_for_status()
+
+        wasteApiResult = response.json()
+        _LOGGER.debug("Waste API result: %s", wasteApiResult)
+
+        soup = BeautifulSoup(wasteApiResult["responseContent"], "html.parser")
+
+        entries = []
+        for article in soup.find_all("article"):
+            waste_type = article.h3.string
+            icon = ICON_MAP.get(waste_type, "mdi:trash-can")
+            next_pickup = article.find(class_="next-service").string.strip()
+            if re.match("[^\s]* \d{1,2}\/\d{1,2}\/\d{4}", next_pickup):
+                next_pickup_date = datetime.strptime(
+                    next_pickup.split(sep=" ")[1], "%d/%m/%Y"
+                ).date()
+                entries.append(Collection(date=next_pickup_date, t=waste_type, icon=icon))
+
+        return entries

+ 146 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/muellmax_de.py

@@ -0,0 +1,146 @@
+from html.parser import HTMLParser
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "Müllmax"
+DESCRIPTION = "Source for Müllmax waste collection."
+URL = "https://www.muellmax.de"
+TEST_CASES = {
+    "Rhein-Sieg-Kreis, Alfter": {
+        "service": "Rsa",
+        "mm_frm_ort_sel": "Alfter",
+        "mm_frm_str_sel": "Ahrweg (105-Ende/94-Ende)",
+    },
+    "Münster, Achatiusweg": {"service": "Awm", "mm_frm_str_sel": "Achatiusweg"},
+}
+
+
+# Parser for HTML checkbox
+class InputCheckboxParser(HTMLParser):
+    def __init__(self, startswith):
+        super().__init__()
+        self._startswith = startswith
+        self._value = {}
+
+    @property
+    def value(self):
+        return self._value
+
+    def handle_starttag(self, tag, attrs):
+        if tag == "input":
+            d = dict(attrs)
+            if d.get("name", "").startswith(self._startswith):
+                self._value[d["name"]] = d.get("value")
+
+
+# Parser for HTML input (hidden) text
+class InputTextParser(HTMLParser):
+    def __init__(self, **identifiers):
+        super().__init__()
+        self._identifiers = identifiers
+        self._value = None
+
+    @property
+    def value(self):
+        return self._value
+
+    def handle_starttag(self, tag, attrs):
+        if tag == "input":
+            d = dict(attrs)
+            for key, value in self._identifiers.items():
+                if key not in d or d[key] != value:
+                    return
+            self._value = d.get("value")
+
+
+class Source:
+    def __init__(
+        self, service, mm_frm_ort_sel=None, mm_frm_str_sel=None, mm_frm_hnr_sel=None
+    ):
+        self._service = service
+        self._mm_frm_ort_sel = mm_frm_ort_sel
+        self._mm_frm_str_sel = mm_frm_str_sel
+        self._mm_frm_hnr_sel = mm_frm_hnr_sel
+        self._ics = ICS()
+
+    def fetch(self):
+        mm_ses = InputTextParser(name="mm_ses")
+
+        url = f"https://www.muellmax.de/abfallkalender/{self._service.lower()}/res/{self._service}Start.php"
+        r = requests.get(url)
+        mm_ses.feed(r.text)
+
+        # select "Abfuhrtermine", returns ort or an empty street search field
+        args = {"mm_ses": mm_ses.value, "mm_aus_ort.x": 0, "mm_aus_ort.x": 0}
+        r = requests.post(url, data=args)
+        mm_ses.feed(r.text)
+
+        if self._mm_frm_ort_sel is not None:
+            # select city
+            args = {
+                "mm_ses": mm_ses.value,
+                "xxx": 1,
+                "mm_frm_ort_sel": self._mm_frm_ort_sel,
+                "mm_aus_ort_submit": "weiter",
+            }
+            r = requests.post(url, data=args)
+            mm_ses.feed(r.text)
+
+        if self._mm_frm_str_sel is not None:
+            # show street selection page
+            args = {
+                "mm_ses": mm_ses.value,
+                "xxx": 1,
+                "mm_frm_str_name": "",
+                "mm_aus_str_txt_submit": "suchen",
+            }
+            r = requests.post(url, data=args)
+            mm_ses.feed(r.text)
+
+            # select street
+            args = {
+                "mm_ses": mm_ses.value,
+                "xxx": 1,
+                "mm_frm_str_sel": self._mm_frm_str_sel,
+                "mm_aus_str_sel_submit": "weiter",
+            }
+            r = requests.post(url, data=args)
+            mm_ses.feed(r.text)
+
+        if self._mm_frm_hnr_sel is not None:
+            # select house number
+            args = {
+                "mm_ses": mm_ses.value,
+                "xxx": 1,
+                "mm_frm_hnr_sel": self._mm_frm_hnr_sel,
+                "mm_aus_hnr_sel_submit": "weiter",
+            }
+            r = requests.post(url, data=args)
+            mm_ses.feed(r.text)
+
+        # select to get ical
+        args = {"mm_ses": mm_ses.value, "xxx": 1, "mm_ica_auswahl": "iCalendar-Datei"}
+        r = requests.post(url, data=args)
+        mm_ses.feed(r.text)
+
+        mm_frm_fra = InputCheckboxParser(startswith="mm_frm_fra")
+        mm_frm_fra.feed(r.text)
+
+        # get ics file
+        args = {"mm_ses": mm_ses.value, "xxx": 1, "mm_frm_type": "termine"}
+        args.update(mm_frm_fra.value)
+        args.update({"mm_ica_gen": "iCalendar-Datei laden"})
+        r = requests.post(url, data=args)
+        mm_ses.feed(r.text)
+
+        entries = []
+
+        # parse ics file
+        dates = self._ics.convert(r.text)
+
+        entries = []
+        for d in dates:
+            entries.append(Collection(d[0], d[1]))
+        return entries

+ 76 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/muenchenstein_ch.py

@@ -0,0 +1,76 @@
+import json
+from datetime import datetime, timedelta
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "Abfallsammlung Münchenstein"
+DESCRIPTION = "Source for Muenchenstein waste collection."
+URL = "https://www.muenchenstein.ch/abfallsammlung"
+TEST_CASES = {
+    "Abfuhrkreis Ost": {"waste_district": "Abfuhrkreis Ost"},
+    "Abfuhrkreis West": {"waste_district": "492"},
+}
+
+
+IconMap = {
+    "kehricht": "mdi:trash-can",
+    "hackseldienst": "mdi:leaf",
+    "papierabfuhr": "mdi:newspaper-variant-multiple-outline",
+    "kartonabfuhr": "mdi:package-variant",
+    "altmetalle": "mdi:nail",
+}
+
+
+class Source:
+    def __init__(self, waste_district):
+        self._waste_district = waste_district
+
+    def fetch(self):
+        response = requests.get(URL)
+
+        html = BeautifulSoup(response.text, "html.parser")
+
+        table = html.find("table", attrs={"id": "icmsTable-abfallsammlung"})
+        data = json.loads(table.attrs["data-entities"])
+
+        entries = []
+        for item in data["data"]:
+            if (
+                self._waste_district in item["abfallkreisIds"]
+                or self._waste_district in item["abfallkreisNameList"]
+            ):
+                next_pickup = item["_anlassDate-sort"].split()[0]
+                next_pickup_date = datetime.fromisoformat(next_pickup).date()
+
+                waste_type = BeautifulSoup(item["name"], "html.parser").text
+                waste_type_sorted = BeautifulSoup(item["name-sort"], "html.parser").text
+
+                entries.append(
+                    Collection(
+                        date=next_pickup_date,
+                        t=waste_type,
+                        icon=IconMap.get(waste_type_sorted, "mdi:trash-can"),
+                    )
+                )
+
+        # Collection of "Kehricht und Kleinsperrgut brennbar" are not listed with dates as events on website.
+        # Instead it states the day of the week for each waste district: tuesday for east and friday for west
+        # So we're going to set those collections programmatically for the next 4 occurrences
+        weekday_collection = 2 if self._waste_district == 'Abfuhrkreis Ost' or self._waste_district == 491 else 4
+        weekday_today = datetime.now().isoweekday()
+        for x in range(4):
+            days_to_pickup = (x * 7) + ((weekday_collection - weekday_today) % 7)
+            next_pickup_date = (datetime.now() + timedelta(days=days_to_pickup)).date()
+            waste_type = "Kehricht und Kleinsperrgut brennbar"
+
+            entries.append(
+                Collection(
+                    date=next_pickup_date,
+                    t=waste_type,
+                    icon=IconMap.get(waste_type_sorted, "mdi:trash-can"),
+                )
+            )
+
+        return entries

+ 117 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/nawma_sa_gov_au.py

@@ -0,0 +1,117 @@
+import datetime
+from html.parser import HTMLParser
+
+import requests
+from waste_collection_schedule import Collection
+
+TITLE = "North Adelaide Waste Management Authority"
+DESCRIPTION = (
+    "Source for nawma.sa.gov.au (Salisbury, Playford, and Gawler South Australia)."
+)
+URL = "http://www.nawma.sa.gov.au"
+TEST_CASES = {
+    "128 Bridge Road": {
+        "street_number": "128",
+        "street_name": "Bridge Road",
+        "suburb": "Pooraka",
+    },  # Monday
+    "226 Bridge Road": {
+        "street_number": "226",
+        "street_name": "Bridge Road",
+        "suburb": "Pooraka",
+    },  # Monday reverse
+    "Whites Road": {"street_name": "Whites Road", "suburb": "Paralowie"},  # Tuesday
+    "Hazel Avenue": {
+        "street_name": "Hazel Avenue",
+        "suburb": "Angle Vale",
+    },  # Wednesday
+    "155 Murray St": {
+        "street_name": "Murray Street (sec between Ayers and the railway line",
+        "suburb": "Gawler",
+    },  # Thursday
+    "Edward Crescent": {
+        "street_name": "Edward Crescent",
+        "suburb": "Evanston Park",
+    },  # Friday
+}
+
+HEADERS = {"user-agent": "Mozilla/5.0 (xxxx Windows NT 10.0; Win64; x64)"}
+
+
+class CollectionResultsParser(HTMLParser):
+    """Parser for the collection results <div> element returned by the API."""
+
+    def __init__(self):
+        super().__init__()
+        self._entries = []
+
+        self._current_type = None
+
+        # State machine
+        self._in_entry = False
+        self._read_type = True
+        self._cell_count = 0
+
+    @property
+    def entries(self):
+        return self._entries
+
+    def handle_starttag(self, tag, attrs):
+        attrs_dict = dict(attrs)
+        if tag == "div" and attrs_dict.get("class") == "coll-content":
+            self._in_entry = True
+            self._cell_count = 0
+        elif tag == "h6" and self._in_entry:
+            self._read_type = True
+        elif tag == "td":
+            self._cell_count = self._cell_count + 1
+
+    def handle_endtag(self, tag):
+        if tag == "div":
+            self._in_entry = False
+
+    def handle_data(self, data):
+        if self._read_type:
+            self._current_type = data
+            self._read_type = False
+
+        elif self._in_entry and self._cell_count == 6:
+            date = datetime.datetime.strptime(data.strip(), "%d %B %Y").date()
+
+            icon = "mdi:trash-can"
+            if "yellow" in self._current_type:
+                icon = "mdi:recycle"
+            elif "green" in self._current_type:
+                icon = "mdi:leaf"
+
+            self._entries.append(Collection(date, self._current_type, icon))
+
+            # We're finished with this entry
+            self._in_entry = False
+
+
+class Source:
+    def __init__(self, street_name, suburb, street_number="", pid="2444"):
+        self._street_number = street_number  # Optional
+        self.street_name = street_name
+        self._suburb = suburb
+        self._pid = pid  # Not really sure what this is!
+
+    def fetch(self):
+        params = {
+            "action": "collection_day",
+            "street_no": self._street_number,
+            "street": self.street_name,
+            "area": self._suburb,
+            "pid": self._pid,
+        }
+
+        r = requests.post(
+            "http://www.nawma.sa.gov.au/wp-admin/admin-ajax.php",
+            headers=HEADERS,
+            data=params,  # The parameters are sent as the body of the post
+        )
+
+        p = CollectionResultsParser()
+        p.feed(r.text)
+        return p.entries

+ 63 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/newcastle_gov_uk.py

@@ -0,0 +1,63 @@
+import logging
+from datetime import datetime
+import re
+import requests
+# These lines are needed to suppress the InsecureRequestWarning resulting from the POST verify=False option
+# With verify=True the POST fails due to a SSLCertVerificationError.
+import urllib3
+from waste_collection_schedule import Collection
+
+urllib3.disable_warnings()
+# The following links may provide a better way of dealing with this, as using verify=False is not ideal:
+# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings
+# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl
+
+_LOGGER = logging.getLogger(__name__)
+
+TITLE = "newcastle.gov.uk"
+DESCRIPTION = (
+    """Source for waste collection services for Newcastle City Council"""
+)
+URL = "https://community.newcastle.gov.uk/my-neighbourhood/ajax/getBinsNew.php"
+REGEX = "<strong>(Green|Blue|Brown) [bB]in \\((Domestic|Recycling|Garden)( Waste)?\\) details: <\\/strong><br\\/>" \
+        "collection day : [a-zA-Z]*day<br\\/>" \
+        "Next collection : ([0-9]{2}-[A-Za-z]+-[0-9]{4})"
+ICONS = {
+    "DOMESTIC": "mdi:trash-can",
+    "RECYCLING": "mdi:recycle",
+    "GARDEN": "mdi:leaf",
+}
+
+TEST_CASES = {
+    "Test_001": {"uprn": "004510053797"},
+    "Test_002": {"uprn": 4510053797}
+}
+
+
+class Source:
+    def __init__(self, uprn=None):
+        self._uprn = str(uprn).zfill(12)
+        if not self._uprn:
+            _LOGGER.error(
+                "uprn must be provided in config"
+            )
+        self._uprn = self._uprn.zfill(12)
+        self._session = requests.Session()
+
+    def fetch(self):
+        entries = []
+        res = requests.get(f"{URL}?uprn={self._uprn}")
+        collections = re.findall(REGEX, res.text)
+
+        for collection in collections:
+            collection_type = collection[1]
+            collection_date = collection[3]
+            entries.append(
+                Collection(
+                    date=datetime.strptime(collection_date, '%d-%b-%Y').date(),
+                    t=collection_type,
+                    icon=ICONS.get(collection_type.upper()),
+                )
+            )
+
+        return entries

+ 80 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/nottingham_city_gov_uk.py

@@ -0,0 +1,80 @@
+import json
+import datetime
+import time
+
+import requests
+from waste_collection_schedule import Collection  # type: ignore[attr-defined]
+
+TITLE = "nottinghamcity.gov.uk"
+DESCRIPTION = "Source for nottinghamcity.gov.uk services for the city of Nottingham, UK."
+URL = "https://nottinghamcity.gov.uk"
+TEST_CASES = {
+    "Douglas Rd, Nottingham NG7 1NW": {"uprn": "100031540175"},
+    "Harlaxton Drive, Nottingham, NG7 1JE": {"uprn": "100031553830"},
+}
+
+BINS = {
+    "DryRecyclingDay": {
+        "icon": "mdi:recycle",
+        "name": "Recycling"
+    },
+    "DomesticDay": {
+        "icon": "mdi:trash-can",
+        "name": "General"
+    },
+    "GardenDay": {
+        "icon": "mdi:leaf",
+        "name": "Garden"
+    },
+    "FoodWaste": {
+        "icon": "mdi:food-apple",
+        "name": "Food"
+    }
+}
+
+class Source:
+    def __init__(self, uprn):
+        self._uprn = uprn
+
+    def fetch(self):
+        # get json file
+        r = requests.get(
+            f"https://geoserver.nottinghamcity.gov.uk/myproperty/handler/proxy.ashx?http://geoserver.nottinghamcity.gov.uk/wcf/BinCollection.svc/livebin/{self._uprn}"
+        )
+
+        # extract data from json
+        data = json.loads(r.text)
+
+        entries = []
+
+        today = datetime.date.today() - datetime.timedelta(days=datetime.date.today().isoweekday() - 1)
+
+        for bin in BINS.keys():
+            props = BINS[bin]
+            day = data["CollectionDetails"][bin]
+            if day == "Not Applicable":
+                continue
+
+            day = time.strptime(day, "%A").tm_wday
+
+            # RecyclingWeek being B means recycling is on even numbered weeks
+            week_offset = 0
+            recycling_shift = data["CollectionDetails"]["RecyclingWeek"] == "A"
+            domestic_shift = data["CollectionDetails"]["RecyclingWeek"] == "B"
+
+            if bin == "DryRecyclingDay" or bin == "GardenDay":
+                week_offset = (datetime.date.today().isocalendar().week + recycling_shift) % 2
+            elif bin == "DomesticDay":
+                week_offset = (datetime.date.today().isocalendar().week + domestic_shift) % 2
+
+            next_date = today + datetime.timedelta(days=day, weeks=week_offset)
+
+            entries.append(
+               Collection(
+                   date = next_date,
+                   t = props["name"],
+                   icon = props["icon"]
+               )
+            )
+
+        return entries

+ 82 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/nsomerset_gov_uk.py

@@ -0,0 +1,82 @@
+import logging
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection
+
+TITLE = "North Somerset.gov.uk"
+DESCRIPTION = "Source for n-somerset.gov.uk services for North Somerset, UK."
+URL = "n-somerset.gov.uk"
+
+TEST_CASES = {
+    "Walliscote Grove Road, Weston super Mare": {
+        "uprn": "24009468",
+        "postcode": "BS23 1UJ",
+    },
+    "Walliscote Road, Weston super Mare": {"uprn": "24136727", "postcode": "BS23 1EF"},
+}
+
+ICON_MAP = {
+    "RUBBISH": "mdi:trash-can",
+    "RECYCLING": "mdi:recycle",
+    "FOOD": "mdi:food",
+}
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+    def __init__(self, uprn, postcode):
+        self._uprn = uprn
+        self._postcode = postcode
+
+    def fetch(self):
+        request = requests.post(
+            "https://forms.n-somerset.gov.uk/Waste/CollectionSchedule",
+            data={
+                "PreviousHouse": "",
+                "PreviousPostcode": "-",
+                "Postcode": self._postcode,
+                "SelectedUprn": self._uprn,
+            },
+        )
+
+        soup_result = BeautifulSoup(request.text, "html.parser").table
+
+        entries = []
+
+        fields = []
+        table_data = []
+
+        for tr in soup_result.find_all("tr"):
+            for th in tr.find_all("th"):
+                fields.append(th.text)
+
+        for tr in soup_result.find_all("tr"):
+            datum = {}
+            for i, td in enumerate(tr.find_all("td")):
+                datum[fields[i]] = td.text
+            if datum:
+                table_data.append(datum)
+
+        if not table_data:
+            return entries
+
+        for collection in table_data:
+            try:
+                entries.append(
+                    Collection(
+                        date=datetime.strptime(
+                            collection["Next collection date"], "%A %d %B"
+                        )
+                        .replace(year=datetime.now().year)
+                        .date(),
+                        t=collection["Service"],
+                        icon=ICON_MAP.get(collection["Service"].upper()),
+                    )
+                )
+            except ValueError:
+                pass
+
+        return entries

+ 0 - 0
custom_components/waste_collection_schedule/waste_collection_schedule/source/oslokommune_no.py


Some files were not shown because too many files changed in this diff