diff --git a/sijapi/__init__.py b/sijapi/__init__.py
index 9cd99bc..cfe5388 100644
--- a/sijapi/__init__.py
+++ b/sijapi/__init__.py
@@ -12,7 +12,7 @@ from typing import List, Optional
 import traceback
 import logging
 from .logs import Logger
-from .classes import AutoResponder, IMAPConfig, SMTPConfig, EmailAccount, EmailContact, IncomingEmail, TimezoneTracker, Database, PyGeolocator
+from .classes import AutoResponder, IMAPConfig, SMTPConfig, EmailAccount, EmailContact, IncomingEmail, TimezoneTracker, Database, Geocoder
 
 # from sijapi.config.config import load_config
 # cfg = load_config()
@@ -68,7 +68,7 @@ GEONAMES_TXT = DATA_DIR / "geonames.txt"
 LOCATIONS_CSV = DATA_DIR / "US.csv"
 TZ = tz.gettz(os.getenv("TZ", "America/Los_Angeles"))
 TZ_CACHE = DATA_DIR / "tzcache.json"
-DynamicTZ = TimezoneTracker(TZ_CACHE)
+GEO = Geocoder(NAMED_LOCATIONS, TZ_CACHE)
 
 ### Obsidian & notes
 ALLOWED_FILENAME_CHARS = r'[^\w \.-]'
@@ -90,7 +90,6 @@ YEAR_FMT = os.getenv("YEAR_FMT")
 MONTH_FMT = os.getenv("MONTH_FMT")
 DAY_FMT = os.getenv("DAY_FMT")
 DAY_SHORT_FMT = os.getenv("DAY_SHORT_FMT")
-GEOLOCATOR = PyGeolocator
 
 ### Large language model
 LLM_URL = os.getenv("LLM_URL", "http://localhost:11434")
diff --git a/sijapi/classes.py b/sijapi/classes.py
index f3444d3..4b94561 100644
--- a/sijapi/classes.py
+++ b/sijapi/classes.py
@@ -1,54 +1,88 @@
-from pydantic import BaseModel
-from typing import List, Optional, Any, Tuple, Dict, Union, Tuple
-from datetime import datetime, timedelta
-import asyncio
-import asyncpg
-import json
 from pydantic import BaseModel, Field
-from typing import Optional
-import asyncpg
-import os
-from typing import Optional, Tuple, Union
-from datetime import datetime, timedelta
+from typing import List, Optional, Any, Tuple, Dict, Union, Tuple
+from datetime import datetime, timedelta, timezone
+import asyncio
 import json
 from timezonefinder import TimezoneFinder
 from pathlib import Path
-
-from pydantic import BaseModel, Field
-from typing import Optional
-
-from pydantic import BaseModel, Field
-from typing import Optional
-import asyncpg
-
-from pydantic import BaseModel, Field
-from typing import Optional
 import asyncpg
+import aiohttp
+import aiofiles
 from contextlib import asynccontextmanager
-
+from concurrent.futures import ThreadPoolExecutor
 import reverse_geocoder as rg
 from timezonefinder import TimezoneFinder
 from srtm import get_data
 
-class PyGeolocator:
-    def __init__(self):
+class Location(BaseModel):
+    latitude: float
+    longitude: float
+    datetime: datetime
+    elevation: Optional[float] = None
+    altitude: Optional[float] = None
+    zip: Optional[str] = None
+    street: Optional[str] = None
+    city: Optional[str] = None
+    state: Optional[str] = None
+    country: Optional[str] = None
+    context: Optional[Dict[str, Any]] = None 
+    class_: Optional[str] = None
+    type: Optional[str] = None
+    name: Optional[str] = None
+    display_name: Optional[str] = None
+    boundingbox: Optional[List[str]] = None
+    amenity: Optional[str] = None
+    house_number: Optional[str] = None
+    road: Optional[str] = None
+    quarter: Optional[str] = None
+    neighbourhood: Optional[str] = None
+    suburb: Optional[str] = None
+    county: Optional[str] = None
+    country_code: Optional[str] = None
+
+    class Config:
+        json_encoders = {
+            datetime: lambda dt: dt.isoformat(),
+        }
+
+
+class Geocoder:
+    def __init__(self, named_locs: Union[str, Path] = None, cache_file: Union[str, Path] = 'timezone_cache.json'):
         self.tf = TimezoneFinder()
         self.srtm_data = get_data()
+        self.named_locs = Path(named_locs) if named_locs else None
+        self.cache_file = Path(cache_file)
+        self.last_timezone: str = "America/Los_Angeles"
+        self.last_update: Optional[datetime] = None
+        self.last_location: Optional[Tuple[float, float]] = None
+        self.executor = ThreadPoolExecutor()
 
-    def get_location(self, lat, lon):
-        result = rg.search((lat, lon))
-        return result[0]['name'], result[0]['admin1'], result[0]['cc']
+    async def location(self, lat: float, lon: float):
+        loop = asyncio.get_running_loop()
+        return await loop.run_in_executor(self.executor, rg.search, [(lat, lon)])
 
-    def get_elevation(self, lat, lon):
-        return self.srtm_data.get_elevation(lat, lon)
+    async def elevation(self, latitude: float, longitude: float, unit: str = "m") -> float:
+        loop = asyncio.get_running_loop()
+        elevation = await loop.run_in_executor(self.executor, self.srtm_data.get_elevation, latitude, longitude)
+        
+        if unit == "m":
+            return elevation
+        elif unit == "km":
+            return elevation / 1000
+        elif unit == "ft" or unit == "'":
+            return elevation * 3.280839895
+        else:
+            raise ValueError(f"Unsupported unit: {unit}")
 
-    def get_timezone(self, lat, lon):
-        return self.tf.timezone_at(lat=lat, lng=lon)
+    async def timezone(self, lat: float, lon: float):
+        loop = asyncio.get_running_loop()
+        timezone = await loop.run_in_executor(self.executor, self.tf.timezone_at, lat, lon)
+        return timezone if timezone else 'Unknown'
 
-    def lookup(self, lat, lon):
-        city, state, country = self.get_location(lat, lon)
-        elevation = self.get_elevation(lat, lon)
-        timezone = self.get_timezone(lat, lon)
+    async def lookup(self, lat: float, lon: float):
+        city, state, country = (await self.location(lat, lon))[0]['name'], (await self.location(lat, lon))[0]['admin1'], (await self.location(lat, lon))[0]['cc']
+        elevation = await self.elevation(lat, lon)
+        timezone = await self.timezone(lat, lon)
         
         return {
             "city": city,
@@ -58,6 +92,179 @@ class PyGeolocator:
             "timezone": timezone
         }
 
+    async def code(self, locations: Union[Location, Tuple[float, float], List[Union[Location, Tuple[float, float]]]]) -> Union[Location, List[Location]]:
+        if isinstance(locations, (Location, tuple)):
+            locations = [locations]
+        
+        processed_locations = []
+        for loc in locations:
+            if isinstance(loc, tuple):
+                processed_locations.append(Location(latitude=loc[0], longitude=loc[1]))
+            elif isinstance(loc, Location):
+                processed_locations.append(loc)
+            else:
+                raise ValueError(f"Unsupported location type: {type(loc)}")
+
+        coordinates = [(location.latitude, location.longitude) for location in processed_locations]
+        
+        geocode_results = await self.location(*zip(*coordinates))
+        elevations = await asyncio.gather(*[self.elevation(lat, lon) for lat, lon in coordinates])
+        timezones = await asyncio.gather(*[self.timezone(lat, lon) for lat, lon in coordinates])
+
+        geocoded_locations = []
+        for location, result, elevation, timezone in zip(processed_locations, geocode_results, elevations, timezones):
+            geocoded_location = Location(
+                latitude=location.latitude,
+                longitude=location.longitude,
+                elevation=elevation,
+                datetime=location.datetime or datetime.now(timezone.utc),
+                zip=result.get("admin2"),
+                city=result.get("name"),
+                state=result.get("admin1"),
+                country=result.get("cc"),
+                context=location.context or {},
+                name=result.get("name"),
+                display_name=f"{result.get('name')}, {result.get('admin1')}, {result.get('cc')}",
+                country_code=result.get("cc"),
+                timezone=timezone
+            )
+
+            # Merge original location data with geocoded data
+            for field in location.__fields__:
+                if getattr(location, field) is None:
+                    setattr(location, field, getattr(geocoded_location, field))
+
+            geocoded_locations.append(location)
+
+        return geocoded_locations[0] if len(geocoded_locations) == 1 else geocoded_locations
+
+    async def geocode_osm(self, latitude: float, longitude: float, email: str):
+        url = f"https://nominatim.openstreetmap.org/reverse?format=json&lat={latitude}&lon={longitude}"
+        headers = {
+            'User-Agent': f'sijapi/1.0 ({email})',  # replace with your app name and email
+        }
+        async with aiohttp.ClientSession() as session:
+            async with session.get(url, headers=headers) as response:
+                response.raise_for_status()
+                data = await response.json()
+        
+        address = data.get("address", {})
+        elevation = await self.elevation(latitude, longitude)
+        return Location(
+            latitude=latitude,
+            longitude=longitude,
+            elevation=elevation,
+            datetime=datetime.now(timezone.utc),
+            zip=address.get("postcode"),
+            street=address.get("road"),
+            city=address.get("city"),
+            state=address.get("state"),
+            country=address.get("country"),
+            context={}, 
+            class_=data.get("class"),
+            type=data.get("type"),
+            name=data.get("name"),
+            display_name=data.get("display_name"),
+            amenity=address.get("amenity"),
+            house_number=address.get("house_number"),
+            road=address.get("road"),
+            quarter=address.get("quarter"),
+            neighbourhood=address.get("neighbourhood"),
+            suburb=address.get("suburb"),
+            county=address.get("county"),
+            country_code=address.get("country_code"),
+            timezone=await self.timezone(latitude, longitude)
+        )
+
+    
+    def load_override_locations(self):
+        if self.named_locs and self.named_locs.exists():
+            with open(self.named_locs, 'r') as file:
+                return yaml.safe_load(file)
+        return []
+    
+
+    def haversine(self, lat1, lon1, lat2, lon2):
+        R = 6371  # Earth's radius in kilometers
+
+        lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])
+        dlat = lat2 - lat1
+        dlon = lon2 - lon1
+
+        a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
+        c = 2 * atan2(sqrt(a), sqrt(1-a))
+
+        return R * c
+
+    async def find_override_location(self, lat: float, lon: float) -> Optional[str]:
+        closest_location = None
+        closest_distance = float('inf')
+        
+        for location in self.override_locations:
+            loc_name = location.get("name")
+            loc_lat = location.get("latitude")
+            loc_lon = location.get("longitude")
+            loc_radius = location.get("radius")
+            
+            distance = self.haversine(lat, lon, loc_lat, loc_lon)
+            
+            if distance <= loc_radius:
+                if distance < closest_distance:
+                    closest_distance = distance
+                    closest_location = loc_name
+        
+        return closest_location
+
+    async def refresh_timezone(self, location: Union[Location, Tuple[float, float]], force: bool = False) -> str:
+        if isinstance(location, Location):
+            lat, lon = location.latitude, location.longitude
+        else:
+            lat, lon = location
+
+        current_time = datetime.now()
+        if (force or
+            not self.last_update or
+            current_time - self.last_update > timedelta(hours=1) or
+            self.last_location != (lat, lon)):
+            new_timezone = await self.timezone(lat, lon)
+            self.last_timezone = new_timezone
+            self.last_update = current_time
+            self.last_location = (lat, lon)
+            await self.tz_save()
+        return self.last_timezone
+
+    async def tz_save(self):
+        cache_data = {
+            'last_timezone': self.last_timezone,
+            'last_update': self.last_update.isoformat() if self.last_update else None,
+            'last_location': self.last_location
+        }
+        async with aiofiles.open(self.cache_file, 'w') as f:
+            await f.write(json.dumps(cache_data))
+
+    async def tz_cached(self):
+        try:
+            async with aiofiles.open(self.cache_file, 'r') as f:
+                cache_data = json.loads(await f.read())
+            self.last_timezone = cache_data.get('last_timezone')
+            self.last_update = datetime.fromisoformat(cache_data['last_update']) if cache_data.get('last_update') else None
+            self.last_location = tuple(cache_data['last_location']) if cache_data.get('last_location') else None
+        except (FileNotFoundError, json.JSONDecodeError):
+            # If file doesn't exist or is invalid, we'll start fresh
+            pass
+
+    async def tz_current(self, location: Union[Location, Tuple[float, float]]) -> str:
+        await self.tz_cached()
+        return await self.refresh_timezone(location)
+
+    async def tz_last(self) -> Optional[str]:
+        await self.tz_cached()
+        return self.last_timezone
+
+    def __del__(self):
+        self.executor.shutdown()
+
+
 class Database(BaseModel):
     host: str = Field(..., description="Database host")
     port: int = Field(5432, description="Database port")
@@ -98,15 +305,6 @@ class Database(BaseModel):
         return self.dict(exclude_none=True)
 
 
-class AutoResponder(BaseModel):
-    name: str
-    style: str
-    context: str
-    ollama_model: str = "llama3"
-    whitelist: List[str]
-    blacklist: List[str]
-    image_prompt: Optional[str] = None
-
 class IMAPConfig(BaseModel):
     username: str
     password: str
@@ -121,6 +319,16 @@ class SMTPConfig(BaseModel):
     port: int
     encryption: str = None
 
+class AutoResponder(BaseModel):
+    name: str
+    style: str
+    context: str
+    ollama_model: str = "llama3"
+    whitelist: List[str]
+    blacklist: List[str]
+    image_prompt: Optional[str] = None
+    smtp: SMTPConfig
+    
 class EmailAccount(BaseModel):
     name: str
     refresh: int
@@ -129,7 +337,6 @@ class EmailAccount(BaseModel):
     summarize: bool = False
     podcast: bool = False
     imap: IMAPConfig
-    smtp: SMTPConfig
     autoresponders: Optional[List[AutoResponder]]
 
 class EmailContact(BaseModel):
@@ -143,95 +350,3 @@ class IncomingEmail(BaseModel):
     subject: str
     body: str
     attachments: List[dict] = []
-
-
-class Location(BaseModel):
-    latitude: float
-    longitude: float
-    datetime: datetime
-    elevation: Optional[float] = None
-    altitude: Optional[float] = None
-    zip: Optional[str] = None
-    street: Optional[str] = None
-    city: Optional[str] = None
-    state: Optional[str] = None
-    country: Optional[str] = None
-    context: Optional[Dict[str, Any]] = None 
-    class_: Optional[str] = None
-    type: Optional[str] = None
-    name: Optional[str] = None
-    display_name: Optional[str] = None
-    boundingbox: Optional[List[str]] = None
-    amenity: Optional[str] = None
-    house_number: Optional[str] = None
-    road: Optional[str] = None
-    quarter: Optional[str] = None
-    neighbourhood: Optional[str] = None
-    suburb: Optional[str] = None
-    county: Optional[str] = None
-    country_code: Optional[str] = None
-
-    class Config:
-        json_encoders = {
-            datetime: lambda dt: dt.isoformat(),
-        }
-
-
-
-class TimezoneTracker:
-    def __init__(self, cache_file: Union[str, Path] = 'timezone_cache.json'):
-        self.cache_file = Path(cache_file)
-        self.last_timezone: str = "America/Los_Angeles"
-        self.last_update: Optional[datetime] = None
-        self.last_location: Optional[Tuple[float, float]] = None
-        self.tf = TimezoneFinder()
-
-    def find(self, lat: float, lon: float) -> str:
-        timezone = self.tf.timezone_at(lat=lat, lng=lon)
-        return timezone if timezone else 'Unknown'
-
-    async def refresh(self, location: Union[Location, Tuple[float, float]], force: bool = False) -> str:
-        if isinstance(location, Location):
-            lat, lon = location.latitude, location.longitude
-        else:
-            lat, lon = location
-
-        current_time = datetime.now()
-        if (force or
-            not self.last_update or
-            current_time - self.last_update > timedelta(hours=1) or
-            self.last_location != (lat, lon)):
-            new_timezone = self.find(lat, lon)
-            self.last_timezone = new_timezone
-            self.last_update = current_time
-            self.last_location = (lat, lon)
-            await self.save_to_cache()
-        return self.last_timezone
-
-    async def save_to_cache(self):
-        cache_data = {
-            'last_timezone': self.last_timezone,
-            'last_update': self.last_update.isoformat() if self.last_update else None,
-            'last_location': self.last_location
-        }
-        with self.cache_file.open('w') as f:
-            json.dump(cache_data, f)
-
-    async def load_from_cache(self):
-        try:
-            with self.cache_file.open('r') as f:
-                cache_data = json.load(f)
-            self.last_timezone = cache_data.get('last_timezone')
-            self.last_update = datetime.fromisoformat(cache_data['last_update']) if cache_data.get('last_update') else None
-            self.last_location = tuple(cache_data['last_location']) if cache_data.get('last_location') else None
-        except (FileNotFoundError, json.JSONDecodeError):
-            # If file doesn't exist or is invalid, we'll start fresh
-            pass
-
-    async def get_current(self, location: Union[Location, Tuple[float, float]]) -> str:
-        await self.load_from_cache()
-        return await self.refresh(location)
-
-    async def get_last(self) -> Optional[str]:
-        await self.load_from_cache()
-        return self.last_timezone
diff --git a/sijapi/config/.env-example b/sijapi/config/.env-example
index 5a23da9..3ef3678 100644
--- a/sijapi/config/.env-example
+++ b/sijapi/config/.env-example
@@ -96,7 +96,7 @@ TRUSTED_SUBNETS=127.0.0.1/32,10.13.37.0/24,100.64.64.0/24
 #                                 ──────────                                       
 #                                                                                  
 #─── router selection: ────────────────────────────────────────────────────────────
-ROUTERS=asr,calendar,cf,email,health,llm,locate,note,rag,sd,serve,time,tts,weather
+ROUTERS=asr,cal,cf,email,health,llm,loc,note,rag,sd,serve,time,tts,weather
 UNLOADED=ig
 #─── notes: ──────────────────────────────────────────────────────────────────────
 #                                                                                  
@@ -115,7 +115,7 @@ UNLOADED=ig
 #         asr:  requires faster_whisper — $ pip install faster_whisper — and      
 #               downloading the model file specified in ASR_DEFAULT_MODEL.        
 #                                                                                                          
-#    calendar:  requires (1) a Microsoft 365 account with a properly configured   
+#         cal:  requires (1) a Microsoft 365 account with a properly configured   
 #               Azure Active Directory app, and/or (2) Calendars on macOS. 
 #                                                                                  
 #          cf:  interfaces with the Cloudflare API and Caddy to register new   
@@ -138,7 +138,7 @@ UNLOADED=ig
 #              configured separately in the ig_config.json file; relies heavily
 #              on the llm and sd routers which have their own dependencies.   
 #                                                                                  
-#     locate:  some endpoints work as is, but the core location tracking 
+#        loc:  some endpoints work as is, but the core location tracking 
 #              functionality requires Postgresql + PostGIS extension and are
 #              designed specifically to pair with a mobile device where 
 #              Pythonista is installed and configured to run the
@@ -148,8 +148,8 @@ UNLOADED=ig
 #       note:  designed for use with Obsidian plus the Daily Notes and Tasks 
 #              core extensions; and the Admonitions, Banners, Icons (with the 
 #              Lucide pack), and Make.md community extensions. Moreover `notes`
-#              relies heavily on the calendar, llm, locate, sd, summarize, time, 
-#              tts, and weather routers and accordingly on the external 
+#              relies heavily on the cal, llm, loc, sd, summarize, time, loc, 
+#              and weather routers and accordingly on the external 
 #              dependencies of each.
 #                                                                                  
 #         sd:  requires ComfyUI plus any modules and StableDiffusion models 
@@ -165,7 +165,7 @@ UNLOADED=ig
 #  
 #    weather:  requires a VisualCrossing API key and is designed for (but doesn't
 #              itself strictly require) Postgresql with the PostGIS extension; 
-#              (... but it presently relies on the locate router, which does).
+#              (... but it presently relies on the loc router, which does).
 #                                                                                    
 #                                                                                    
 #     ... Whew! that was a lot, right? I'm so glad we're in this together...         
@@ -217,7 +217,7 @@ TAILSCALE_API_KEY=¿SECRET?   # <--- enter your own TS API key
 #      ░▒      ░ ░ ░ ▒  ░  ░  ░    ░      ░ ░   ░   ░░   ░    ░   ░  ░  ░             
 #      ░░          ░ ░T̷ O̷ G̷ E̷ T̷ H̷ ░ R̷.        ░    ░        ░  ░      ░             
 #                                           J U S T              ░             
-#─── frag, or weat,and locate modules:──  H O L D   M Y   H A N D.
+#─── frag, or weat,and loc modules:──────  H O L D   M Y   H A N D.
 DB_NAME=db                                                                      
 #                                                              
 DB_HOST=127.0.0.1                                                                   
@@ -237,12 +237,12 @@ DB_SSH_PASS=¿SECRET?          # <--- enter SSH password for pg server (if not l
 #   variables allow database access over an SSH tunnel.                             
 #                                                                                   
 #   In the current implementation, we rely on Postgres to hold:                     
-#       i.  user-logged location data (locate module), and                          
+#       i.  user-logged location data (loc module), and                          
 #      ii.  results from past weather forecast checks (weather module).             
 #                                                                                     
 #   A future version will hopefully make use of PostGIS's geocoding capabilities,     
 #   and add a vector database for the LLM module. Until then it's up to you if the    
-#   locate and weather modules are worth the hassle of maintaining Postgres.          
+#   loc and weather modules are worth the hassle of maintaining Postgres.          
 #                                   ──────────                                        
 #                                                                                     
 #───────────────────────────────   𝐼   𝐵 𝐸 𝑇   𝑌 𝑂 𝑈   ─────────────────────────────────
diff --git a/sijapi/routers/cal.py b/sijapi/routers/cal.py
new file mode 100644
index 0000000..7cca17a
--- /dev/null
+++ b/sijapi/routers/cal.py
@@ -0,0 +1,414 @@
+'''
+Calendar module using macOS Calendars and/or Microsoft 365 via its Graph API.
+Depends on: 
+  LOGGER, ICAL_TOGGLE, ICALENDARS, MS365_TOGGLE, MS365_CLIENT_ID, MS365_SECRET, MS365_AUTHORITY_URL, MS365_SCOPE, MS365_REDIRECT_PATH, MS365_TOKEN_PATH
+'''
+from fastapi import APIRouter, Depends, HTTPException, status, Request
+from fastapi.responses import RedirectResponse, JSONResponse
+from fastapi.security import OAuth2PasswordBearer
+import httpx
+import json
+import os
+import time
+from dateutil.parser import isoparse as parse_iso
+import threading
+from typing import Dict, List, Any
+from datetime import datetime, timedelta
+from Foundation import NSDate, NSRunLoop
+import EventKit as EK
+from sijapi import L, ICAL_TOGGLE, ICALENDARS, MS365_TOGGLE, MS365_CLIENT_ID, MS365_SECRET, MS365_AUTHORITY_URL, MS365_SCOPE, MS365_REDIRECT_PATH, MS365_TOKEN_PATH
+from sijapi.routers import loc
+
+cal = APIRouter()
+oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/token")
+timeout = httpx.Timeout(12)
+
+if MS365_TOGGLE is True:
+    L.CRIT(f"Visit https://api.sij.ai/o365/login to obtain your Microsoft 365 authentication token.")
+
+    @cal.get("/o365/login")
+    async def login():
+        L.DEBUG(f"Received request to /o365/login")
+        L.DEBUG(f"SCOPE: {MS365_SCOPE}")
+        if not MS365_SCOPE:
+            L.ERR("No scopes defined for authorization.")
+            raise HTTPException(
+                status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+                detail="No scopes defined for authorization."
+            )
+        authorization_url = f"{MS365_AUTHORITY_URL}/oauth2/v2.0/authorize?client_id={MS365_CLIENT_ID}&response_type=code&redirect_uri={MS365_REDIRECT_PATH}&scope={'+'.join(MS365_SCOPE)}"
+        L.INFO(f"Redirecting to authorization URL: {authorization_url}")
+        return RedirectResponse(authorization_url)
+
+    @cal.get("/o365/oauth_redirect")
+    async def oauth_redirect(code: str = None, error: str = None):
+        L.DEBUG(f"Received request to /o365/oauth_redirect")
+        if error:
+            L.ERR(f"OAuth2 Error: {error}")
+            raise HTTPException(
+                status_code=status.HTTP_400_BAD_REQUEST, detail="OAuth2 Error"
+            )
+        L.INFO(f"Requesting token with authorization code: {code}")
+        token_url = f"{MS365_AUTHORITY_URL}/oauth2/v2.0/token"
+        data = {
+            "client_id": MS365_CLIENT_ID,
+            "client_secret": MS365_SECRET,
+            "code": code,
+            "redirect_uri": MS365_REDIRECT_PATH,
+            "grant_type": "authorization_code"
+        }
+        async with httpx.AsyncClient(timeout=timeout) as client:
+            response = await client.post(token_url, data=data)
+        L.DEBUG(f"Token endpoint response status code: {response.status_code}")
+        L.INFO(f"Token endpoint response text: {response.text}")
+        result = response.json()
+        if 'access_token' in result:
+            await save_token(result)
+            L.INFO("Access token obtained successfully")
+            return {"message": "Access token stored successfully"}
+        else:
+            L.CRIT(f"Failed to obtain access token. Response: {result}")
+            raise HTTPException(
+                status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+                detail="Failed to obtain access token"
+            )
+
+    @cal.get("/o365/me")
+    async def read_items():
+        L.DEBUG(f"Received request to /o365/me")
+        token = await load_token()
+        if not token:
+            raise HTTPException(
+                status_code=status.HTTP_401_UNAUTHORIZED,
+                detail="Access token not found",
+            )
+        graph_url = "https://graph.microsoft.com/v1.0/me"
+        headers = {"Authorization": f"Bearer {token['access_token']}"}
+        async with httpx.AsyncClient(timeout=timeout) as client:
+            response = await client.get(graph_url, headers=headers)
+        if response.status_code == 200:
+            user = response.json()
+            L.INFO(f"User retrieved: {user}")
+            return user
+        else:
+            L.ERR("Invalid or expired token")
+            raise HTTPException(
+                status_code=status.HTTP_401_UNAUTHORIZED,
+                detail="Invalid or expired token",
+                headers={"WWW-Authenticate": "Bearer"},
+            )
+        
+    async def save_token(token):
+        L.DEBUG(f"Saving token: {token}")
+        try:
+            token["expires_at"] = int(time.time()) + token["expires_in"]
+            with open(MS365_TOKEN_PATH, "w") as file:
+                json.dump(token, file)
+                L.DEBUG(f"Saved token to {MS365_TOKEN_PATH}")
+        except Exception as e:
+            L.ERR(f"Failed to save token: {e}")
+
+    async def load_token():
+        if os.path.exists(MS365_TOKEN_PATH):
+            try:
+                with open(MS365_TOKEN_PATH, "r") as file:
+                    token = json.load(file)
+            except FileNotFoundError:
+                L.ERR("Token file not found.")
+                return None
+            except json.JSONDecodeError:
+                L.ERR("Failed to decode token JSON")
+                return None
+            
+            if token:
+                token["expires_at"] = int(time.time()) + token["expires_in"]
+                L.DEBUG(f"Loaded token: {token}")  # Add this line to log the loaded token
+                return token
+            else:
+                L.DEBUG("No token found.")
+                return None
+        else:
+            L.ERR(f"No file found at {MS365_TOKEN_PATH}")
+            return None
+
+
+    async def is_token_expired(token):
+        if "expires_at" not in token:
+            return True  # Treat missing expiration time as expired token
+        expiry_time = datetime.fromtimestamp(token["expires_at"])
+        return expiry_time <= datetime.now()
+
+    async def is_token_expired2(token):
+        graph_url = "https://graph.microsoft.com/v1.0/me"
+        headers = {"Authorization": f"Bearer {token}"}
+        async with httpx.AsyncClient(timeout=timeout) as client:
+            response = await client.get(graph_url, headers=headers)
+        return response.status_code == 401
+
+    async def get_new_token_with_refresh_token(refresh_token):
+        token_url = f"{MS365_AUTHORITY_URL}/oauth2/v2.0/token"
+        data = {
+            "client_id": MS365_CLIENT_ID,
+            "client_secret": MS365_SECRET,
+            "refresh_token": refresh_token,
+            "grant_type": "refresh_token",
+            "scope": " ".join(MS365_SCOPE),
+        }
+        async with httpx.AsyncClient(timeout=timeout) as client:
+            response = await client.post(token_url, data=data)
+        result = response.json()
+        if "access_token" in result:
+            L.INFO("Access token refreshed successfully")
+            return result
+        else:
+            L.ERR("Failed to refresh access token")
+            return None
+
+
+    async def refresh_token():
+        token = await load_token()
+        if not token:
+            L.ERR("No token found in storage")
+            raise HTTPException(
+                status_code=status.HTTP_401_UNAUTHORIZED,
+                detail="No token found",
+            )
+
+        if 'refresh_token' not in token:
+            L.ERR("Refresh token not found in the loaded token")
+            raise HTTPException(
+                status_code=status.HTTP_401_UNAUTHORIZED,
+                detail="Refresh token not found",
+            )
+
+        refresh_token = token['refresh_token']
+        L.DEBUG("Found refresh token, attempting to refresh access token")
+
+        new_token = await get_new_token_with_refresh_token(refresh_token)
+        
+        if new_token:
+            await save_token(new_token)
+            L.INFO("Token refreshed and saved successfully")
+        else:
+            L.ERR("Failed to refresh token")
+            raise HTTPException(
+                status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+                detail="Failed to refresh token",
+            )
+
+
+def get_calendar_ids() -> Dict[str, str]:
+    event_store = EK.EKEventStore.alloc().init()
+    all_calendars = event_store.calendarsForEntityType_(0)  # 0 corresponds to EKEntityTypeEvent
+
+    calendar_identifiers = {
+        calendar.title() : calendar.calendarIdentifier() for calendar in all_calendars
+    }
+    L.DEBUG(f"{calendar_identifiers}")
+    return calendar_identifiers
+
+# Helper to convert datetime to NSDate
+def datetime_to_nsdate(dt: datetime) -> NSDate:
+    return NSDate.dateWithTimeIntervalSince1970_(dt.timestamp())
+
+
+@cal.get("/events")
+async def get_events_endpoint(start_date: str, end_date: str):
+    start_dt = await loc.dt(start_date)
+    end_dt = await loc.dt(end_date)
+    datetime.strptime(start_date, "%Y-%m-%d") or datetime.now()
+    end_dt = datetime.strptime(end_date, "%Y-%m-%d") or datetime.now()
+    response = await get_events(start_dt, end_dt)
+    return JSONResponse(content=response, status_code=200)
+
+async def get_events(start_dt: datetime, end_dt: datetime) -> List:
+    combined_events = []
+    if MS365_TOGGLE:
+        ms_events = await get_ms365_events(start_dt, end_dt)
+        combined_events.extend(ms_events)  # Use extend instead of append
+    
+    if ICAL_TOGGLE:
+        calendar_ids = ICALENDARS  
+        macos_events = get_macos_calendar_events(start_dt, end_dt, calendar_ids)
+        combined_events.extend(macos_events)  # Use extend instead of append
+    
+    parsed_events = await parse_calendar_for_day(start_dt, end_dt, combined_events)
+    return parsed_events
+
+
+def get_macos_calendar_events(start_date: datetime, end_date: datetime, calendar_ids: List[str] = None) -> List[Dict]:
+    event_store = EK.EKEventStore.alloc().init()
+
+    # Request access to EventKit
+    def request_access() -> bool:
+        access_granted = []
+
+        def completion_handler(granted, error):
+            if error is not None:
+                L.ERR(f"Error: {error}")
+            access_granted.append(granted)
+            # Notify the main thread that the completion handler has executed
+            with access_granted_condition:
+                access_granted_condition.notify()
+
+        access_granted_condition = threading.Condition()
+        with access_granted_condition:
+            event_store.requestAccessToEntityType_completion_(0, completion_handler)  # 0 corresponds to EKEntityTypeEvent
+            # Wait for the completion handler to be called
+            access_granted_condition.wait(timeout=10)
+            # Verify that the handler was called and access_granted is not empty
+            if access_granted:
+                return access_granted[0]
+            else:
+                L.ERR("Request access timed out or failed")
+                return False
+
+    if not request_access():
+        L.ERR("Access to calendar data was not granted")
+        return []
+
+    ns_start_date = datetime_to_nsdate(start_date)
+    ns_end_date = datetime_to_nsdate(end_date)
+
+    # Retrieve all calendars
+    all_calendars = event_store.calendarsForEntityType_(0)  # 0 corresponds to EKEntityTypeEvent
+    if calendar_ids:
+        selected_calendars = [cal for cal in all_calendars if cal.calendarIdentifier() in calendar_ids]
+    else:
+        selected_calendars = all_calendars
+
+    # Filtering events by selected calendars
+    predicate = event_store.predicateForEventsWithStartDate_endDate_calendars_(ns_start_date, ns_end_date, selected_calendars)
+    events = event_store.eventsMatchingPredicate_(predicate)
+
+    event_list = []
+    for event in events:
+        # Check if event.attendees() returns None
+        if event.attendees():
+            attendees = [{'name': att.name(), 'email': att.emailAddress()} for att in event.attendees() if att.emailAddress()]
+        else:
+            attendees = []
+
+        # Format the start and end dates properly
+        start_date_str = event.startDate().descriptionWithLocale_(None)
+        end_date_str = event.endDate().descriptionWithLocale_(None)
+
+        event_data = {
+            "subject": event.title(),
+            "id": event.eventIdentifier(),
+            "start": start_date_str,
+            "end": end_date_str,
+            "bodyPreview": event.notes() if event.notes() else '',
+            "attendees": attendees,
+            "location": event.location() if event.location() else '',
+            "onlineMeetingUrl": '',  # Defaulting to empty as macOS EventKit does not provide this
+            "showAs": 'busy',  # Default to 'busy'
+            "isAllDay": event.isAllDay()
+        }
+
+        event_list.append(event_data)
+
+    return event_list
+
+async def get_ms365_events(start_date: datetime, end_date: datetime):
+    token = await load_token()
+    if token:
+        if await is_token_expired(token):
+            await refresh_token()
+    else:
+        raise HTTPException(
+            status_code=status.HTTP_401_UNAUTHORIZED,
+            detail="Access token not found",
+        )
+    # this looks like it might need updating to use tz-aware datetimes converted to UTC...
+    graph_url = f"https://graph.microsoft.com/v1.0/me/events?$filter=start/dateTime ge '{start_date}T00:00:00' and end/dateTime le '{end_date}T23:59:59'"
+    headers = {
+        "Authorization": f"Bearer {token['access_token']}",
+        "Prefer": 'outlook.timezone="Pacific Standard Time"',
+    }
+    async with httpx.AsyncClient() as client:
+        response = await client.get(graph_url, headers=headers)
+
+    if response.status_code != 200:
+        L.ERR("Failed to retrieve events from Microsoft 365")
+        raise HTTPException(
+            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+            detail="Failed to retrieve events",
+        )
+
+    ms_events = response.json().get("value", [])
+    return ms_events
+
+
+async def parse_calendar_for_day(range_start: datetime, range_end: datetime, events: List[Dict[str, Any]]):
+    range_start = await loc.dt(range_start)
+    range_end = await loc.dt(range_end)
+    event_list = []
+
+    for event in events:
+        L.INFO(f"Event: {event}")
+        start_str = event.get('start')
+        end_str = event.get('end')
+
+        if isinstance(start_str, dict):
+            start_str = start_str.get('dateTime')
+        else:
+            L.INFO(f"Start date string not a dict")
+
+        if isinstance(end_str, dict):
+            end_str = end_str.get('dateTime')
+        else:
+            L.INFO(f"End date string not a dict")
+
+        try:
+            start_date = await loc.dt(start_str) if start_str else None
+        except (ValueError, TypeError) as e:
+            L.ERR(f"Invalid start date format: {start_str}, error: {e}")
+            continue
+
+        try:
+            end_date = await loc.dt(end_str) if end_str else None
+        except (ValueError, TypeError) as e:
+            L.ERR(f"Invalid end date format: {end_str}, error: {e}")
+            continue
+
+        L.DEBUG(f"Comparing {start_date} with range {range_start} to {range_end}")
+
+        if start_date:
+            # Ensure start_date is timezone-aware
+            start_date = await loc.dt(start_date)
+            
+            # If end_date is not provided, assume it's the same as start_date
+            if not end_date:
+                end_date = start_date
+            else:
+                end_date = await loc.dt(end_date)
+            
+            # Check if the event overlaps with the given range
+            if (start_date < range_end) and (end_date > range_start):
+                attendees = [{'name': att['name'], 'email': att['email']} for att in event.get('attendees', []) if 'name' in att and 'email' in att]
+                location = event.get('location', '')
+                if isinstance(location, dict):
+                    location = location.get('displayName', '')
+                
+                event_data = {
+                    "name": event.get('subject', ''),
+                    "uid": event.get('id', ''),
+                    "start": start_date.strftime('%H:%M'),
+                    "end": end_date.strftime('%H:%M') if end_date else '',
+                    "description": event.get('bodyPreview', ''),
+                    "attendees": attendees,
+                    "location": location,
+                    "url": event.get('onlineMeetingUrl', ''),
+                    "busystatus": event.get('showAs', ''),
+                    "busy": event.get('showAs', '') in ['busy', 'tentative'],
+                    "all_day": event.get('isAllDay', False)
+                }
+                L.INFO(f"Event_data: {event_data}")
+                event_list.append(event_data)
+            else:
+                L.DEBUG(f"Event outside of specified range: {start_date} to {end_date}")
+        else:
+            L.ERR(f"Invalid or missing start date for event: {event.get('id', 'Unknown ID')}")
+
+    return event_list
\ No newline at end of file
diff --git a/sijapi/routers/email.py b/sijapi/routers/email.py
index f8f5ea9..7c066b0 100644
--- a/sijapi/routers/email.py
+++ b/sijapi/routers/email.py
@@ -3,6 +3,7 @@ Uses IMAP and SMTP login credentials to monitor an inbox and summarize incoming
 '''
 from fastapi import APIRouter
 import asyncio
+import aiofiles
 from imbox import Imbox
 from bs4 import BeautifulSoup
 import os
@@ -19,35 +20,20 @@ import yaml
 from typing import List, Dict, Optional, Set
 from datetime import datetime as dt_datetime
 from sijapi import L, PODCAST_DIR, DEFAULT_VOICE, EMAIL_CONFIG, EMAIL_LOGS
-from sijapi.routers import tts, llm, sd, locate
+from sijapi.routers import loc, tts, llm, sd
 from sijapi.utilities import clean_text, assemble_journal_path, extract_text, prefix_lines
 from sijapi.classes import EmailAccount, IMAPConfig, SMTPConfig, IncomingEmail, EmailContact, AutoResponder
 from sijapi.classes import EmailAccount
 
 email = APIRouter(tags=["private"])
 
+
 def load_email_accounts(yaml_path: str) -> List[EmailAccount]:
     with open(yaml_path, 'r') as file:
         config = yaml.safe_load(file)
     return [EmailAccount(**account) for account in config['accounts']]
 
 
-def get_account_by_email(this_email: str) -> Optional[EmailAccount]:
-    email_accounts = load_email_accounts(EMAIL_CONFIG)
-    for account in email_accounts:
-        if account.imap.username.lower() == this_email.lower():
-            return account
-    return None
-
-def get_imap_details(this_email: str) -> Optional[IMAPConfig]:
-    account = get_account_by_email(this_email)
-    return account.imap if account else None
-
-def get_smtp_details(this_email: str) -> Optional[SMTPConfig]:
-    account = get_account_by_email(this_email)
-    return account.smtp if account else None
-
-
 def get_imap_connection(account: EmailAccount):
     return Imbox(account.imap.host,
         username=account.imap.username,
@@ -56,79 +42,17 @@ def get_imap_connection(account: EmailAccount):
         ssl=account.imap.encryption == 'SSL',
         starttls=account.imap.encryption == 'STARTTLS')
 
-def get_smtp_connection(account: EmailAccount):
+def get_smtp_connection(autoresponder: AutoResponder):
     context = ssl._create_unverified_context()
     
-    if account.smtp.encryption == 'SSL':
-        return SMTP_SSL(account.smtp.host, account.smtp.port, context=context)
-    elif account.smtp.encryption == 'STARTTLS':
-        smtp = SMTP(account.smtp.host, account.smtp.port)
+    if autoresponder.smtp.encryption == 'SSL':
+        return SMTP_SSL(autoresponder.smtp.host, autoresponder.smtp.port, context=context)
+    elif autoresponder.smtp.encryption == 'STARTTLS':
+        smtp = SMTP(autoresponder.smtp.host, autoresponder.smtp.port)
         smtp.starttls(context=context)
         return smtp
     else:
-        return SMTP(account.smtp.host, account.smtp.port)
-
-
-def get_matching_autoresponders(this_email: IncomingEmail, account: EmailAccount) -> List[AutoResponder]:
-    L.DEBUG(f"Called get_matching_autoresponders for email \"{this_email.subject},\" account name \"{account.name}\"")
-    def matches_list(item: str, this_email: IncomingEmail) -> bool:
-        if '@' in item:
-            return item in this_email.sender
-        else:
-            return item.lower() in this_email.subject.lower() or item.lower() in this_email.body.lower()
-    matching_profiles = []
-    for profile in account.autoresponders:
-        whitelist_match = not profile.whitelist or any(matches_list(item, this_email) for item in profile.whitelist)
-        blacklist_match = any(matches_list(item, this_email) for item in profile.blacklist)
-        if whitelist_match and not blacklist_match:
-            L.DEBUG(f"We have a match for {whitelist_match} and no blacklist matches.")
-            matching_profiles.append(profile)
-        elif whitelist_match and blacklist_match:
-            L.DEBUG(f"Matched whitelist for {whitelist_match}, but also matched blacklist for {blacklist_match}")
-        else:
-            L.DEBUG(f"No whitelist or blacklist matches.")
-    return matching_profiles
-
-
-async def generate_auto_response_body(this_email: IncomingEmail, profile: AutoResponder, account: EmailAccount) -> str:
-    now = await locate.localize_datetime(dt_datetime.now())
-    then = await locate.localize_datetime(this_email.datetime_received)
-    age = now - then
-    usr_prompt = f'''
-    Generate a personalized auto-response to the following email:
-    From: {this_email.sender}
-    Sent: {age} ago
-    Subject: "{this_email.subject}"
-    Body:
-    {this_email.body}
-    Respond on behalf of {account.fullname}, who is unable to respond personally because {profile.context}.
-    Keep the response {profile.style} and to the point, but responsive to the sender's inquiry.
-    Do not mention or recite this context information in your response.
-    '''
-    sys_prompt = f"You are an AI assistant helping {account.fullname} with email responses. {account.fullname} is described as: {account.bio}"
-    try:
-        # async def query_ollama(usr: str, sys: str = LLM_SYS_MSG, model: str = DEFAULT_LLM, max_tokens: int = 200):
-        response = await llm.query_ollama(usr_prompt, sys_prompt, profile.ollama_model, 400)
-
-        L.DEBUG(f"query_ollama response: {response}")
-        
-        if isinstance(response, str):
-            response += "\n\n"
-            return response
-        elif isinstance(response, dict):
-            if "message" in response and "content" in response["message"]:
-                return response["message"]["content"]
-            else:
-                L.ERR(f"Unexpected response structure from query_ollama: {response}")
-        else:
-            L.ERR(f"Unexpected response type from query_ollama: {type(response)}")
-        
-        # If we reach here, we couldn't extract a valid response
-        raise ValueError("Could not extract valid response from query_ollama")
-        
-    except Exception as e:
-        L.ERR(f"Error generating auto-response: {str(e)}")
-        return f"Thank you for your email regarding '{this_email.subject}'. We are currently experiencing technical difficulties with our auto-response system. We will review your email and respond as soon as possible. We apologize for any inconvenience."
+        return SMTP(autoresponder.smtp.host, autoresponder.smtp.port)
 
 
 def clean_email_content(html_content):
@@ -156,119 +80,7 @@ async def extract_attachments(attachments) -> List[str]:
 
     return attachment_texts
 
-
-async def save_email(this_email: IncomingEmail, account: EmailAccount):
-    try:
-        md_path, md_relative = assemble_journal_path(this_email.datetime_received, "Emails", this_email.subject, ".md")
-        tts_path, tts_relative = assemble_journal_path(this_email.datetime_received, "Emails", this_email.subject, ".wav")
-        summary = ""
-        if account.summarize == True:
-            email_content = f'At {this_email.datetime_received}, {this_email.sender} sent an email with the subject line "{this_email.subject}". The email in its entirety reads: \n\n{this_email.body}\n"'
-            if this_email.attachments:
-                attachment_texts = await extract_attachments(this_email.attachments)
-                email_content += "\n—--\n" + "\n—--\n".join([f"Attachment: {text}" for text in attachment_texts])
-            summary = await llm.summarize_text(email_content)
-            await tts.local_tts(text_content = summary, speed = 1.1, voice = DEFAULT_VOICE, podcast = account.podcast, output_path = tts_path)
-            summary = prefix_lines(summary, '> ')
-        
-        # Create the markdown content
-        markdown_content = f'''---
-date: {this_email.datetime_received.strftime('%Y-%m-%d')}
-tags:
-- email
----
-|     |     |     | 
-| --: | :--: |  :--: | 
-|  *received* | **{this_email.datetime_received.strftime('%B %d, %Y at %H:%M:%S %Z')}**    |    |
-|  *from* | **[[{this_email.sender}]]**    |    |
-|  *to* | {', '.join([f'**[[{recipient.email}]]**' if not recipient.name else f'**[[{recipient.name}|{recipient.email}]]**' for recipient in this_email.recipients])}   |    |
-|  *subject* | **{this_email.subject}**    |    |
-'''
-    
-        if summary:
-            markdown_content += f'''
-> [!summary]  Summary
->  {summary}
-'''
-  
-        if tts_path.exists():
-            markdown_content += f'''
-![[{tts_path}]]
-'''
-            
-        markdown_content += f'''
----
-{this_email.body}
-'''
-            
-        with open(md_path, 'w', encoding='utf-8') as md_file:
-            md_file.write(markdown_content)
-
-        L.DEBUG(f"Saved markdown to {md_path}")
-
-        return True
-    
-    except Exception as e:
-        L.ERR(f"Exception: {e}")
-        return False
-
-async def autorespond(this_email: IncomingEmail, account: EmailAccount):
-    L.DEBUG(f"Evaluating {this_email.subject} for autoresponse-worthiness...")
-    matching_profiles = get_matching_autoresponders(this_email, account)
-    L.DEBUG(f"Matching profiles: {matching_profiles}")
-    for profile in matching_profiles:
-        L.INFO(f"Generating auto-response to {this_email.subject} with profile: {profile.name}")
-        auto_response_subject = f"Auto-Response Re: {this_email.subject}"
-        auto_response_body = await generate_auto_response_body(this_email, profile, account)
-        L.DEBUG(f"Auto-response: {auto_response_body}")
-        success = await send_auto_response(this_email.sender, auto_response_subject, auto_response_body, profile, account)
-        if success == True:
-            return True
-        
-    L.WARN(f"We were not able to successfully auto-respond to {this_email.subject}")
-    return False
-
-async def send_auto_response(to_email, subject, body, profile, account):
-    try:
-        message = MIMEMultipart()
-        message['From'] = account.smtp.username
-        message['To'] = to_email
-        message['Subject'] = subject
-        message.attach(MIMEText(body, 'plain'))
-
-        if profile.image_prompt:
-            jpg_path = await sd.workflow(profile.image_prompt, earlyout=False, downscale_to_fit=True)
-            if jpg_path and os.path.exists(jpg_path):
-                with open(jpg_path, 'rb') as img_file:
-                    img = MIMEImage(img_file.read(), name=os.path.basename(jpg_path))
-                    message.attach(img)
-
-        L.DEBUG(f"Sending auto-response {to_email} concerning {subject} from account {account.name}...")
-        with get_smtp_connection(account) as server:
-            server.login(account.smtp.username, account.smtp.password)
-            server.send_message(message)
-
-        L.INFO(f"Auto-response sent to {to_email} concerning {subject} from account {account.name}!")
-        return True
-
-    except Exception as e:
-        L.ERR(f"Error in preparing/sending auto-response from account {account.name}: {e}")
-        return False
-
-
-
-
-async def load_processed_uids(filename: Path) -> Set[str]:
-    if filename.exists():
-        with open(filename, 'r') as f:
-            return set(line.strip().split(':')[-1] for line in f)
-    return set()
-
-async def save_processed_uid(filename: Path, account_name: str, uid: str):
-    with open(filename, 'a') as f:
-        f.write(f"{account_name}:{uid}\n")
-
-async def process_account_summarization(account: EmailAccount):
+async def process_account_archival(account: EmailAccount):
     summarized_log = EMAIL_LOGS / account.name / "summarized.txt"
     os.makedirs(summarized_log.parent, exist_ok = True)
 
@@ -283,7 +95,7 @@ async def process_account_summarization(account: EmailAccount):
                     uid_str = uid.decode() if isinstance(uid, bytes) else str(uid)
                     if uid_str not in processed_uids:
                         recipients = [EmailContact(email=recipient['email'], name=recipient.get('name', '')) for recipient in message.sent_to]
-                        localized_datetime = await locate.localize_datetime(message.date)
+                        localized_datetime = await loc.dt(message.date)
                         this_email = IncomingEmail(
                             sender=message.sent_from[0]['email'],
                             datetime_received=localized_datetime,
@@ -292,15 +104,15 @@ async def process_account_summarization(account: EmailAccount):
                             body=clean_email_content(message.body['html'][0]) if message.body['html'] else clean_email_content(message.body['plain'][0]) or "",
                             attachments=message.attachments 
                         )
-                        if account.summarize:
-                            save_success = await save_email(this_email, account)
-                            if save_success:
-                                await save_processed_uid(summarized_log, account.name, uid_str)
-                                L.INFO(f"Summarized email: {uid_str}")
-                            else:
-                                L.WARN(f"Failed to summarize {this_email.subject}")
+                        md_path, md_relative = assemble_journal_path(this_email.datetime_received, "Emails", this_email.subject, ".md")
+                        md_summary = await summarize_single_email(this_email, account.podcast) if account.summarize == True else None
+                        md_content = await archive_single_email(this_email, md_summary)
+                        save_success = await save_email(md_path, md_content)
+                        if save_success:
+                            await save_processed_uid(summarized_log, account.name, uid_str)
+                            L.INFO(f"Summarized email: {uid_str}")
                         else:
-                            L.INFO(f"account.summarize shows as false.")
+                            L.WARN(f"Failed to summarize {this_email.subject}")
                     else:
                         L.DEBUG(f"Skipping {uid_str} because it was already processed.")
         except Exception as e:
@@ -308,51 +120,216 @@ async def process_account_summarization(account: EmailAccount):
         
         await asyncio.sleep(account.refresh)
 
+async def summarize_single_email(this_email: IncomingEmail, podcast: bool = False):
+    tts_path, tts_relative = assemble_journal_path(this_email.datetime_received, "Emails", this_email.subject, ".wav")
+    summary = ""
+    email_content = f'At {this_email.datetime_received}, {this_email.sender} sent an email with the subject line "{this_email.subject}". The email in its entirety reads: \n\n{this_email.body}\n"'
+    if this_email.attachments:
+        attachment_texts = await extract_attachments(this_email.attachments)
+        email_content += "\n—--\n" + "\n—--\n".join([f"Attachment: {text}" for text in attachment_texts])
+    summary = await llm.summarize_text(email_content)
+    await tts.local_tts(text_content = summary, speed = 1.1, voice = DEFAULT_VOICE, podcast = podcast, output_path = tts_path)
+    md_summary = f'```ad.summary\n'
+    md_summary += f'title: {this_email.subject}\n'
+    md_summary += f'{summary}\n'
+    md_summary += f'```\n\n'
+    md_summary += f'![[{tts_path}]]\n' if tts_path.exists() else ''
+
+    return md_summary
+
+async def archive_single_email(this_email: IncomingEmail, summary: str = None):
+    try:
+        markdown_content = f'''---
+date: {this_email.datetime_received.strftime('%Y-%m-%d')}
+tags:
+- email
+---
+|     |     |     | 
+| --: | :--: |  :--: | 
+|  *received* | **{this_email.datetime_received.strftime('%B %d, %Y at %H:%M:%S %Z')}**    |    |
+|  *from* | **[[{this_email.sender}]]**    |    |
+|  *to* | {', '.join([f'**[[{recipient.email}]]**' if not recipient.name else f'**[[{recipient.name}|{recipient.email}]]**' for recipient in this_email.recipients])}   |    |
+|  *subject* | **{this_email.subject}**    |    |
+'''
+    
+        if summary:
+            markdown_content += summary
+
+        markdown_content += f'''
+---
+{this_email.body}
+'''
+        return markdown_content
+    
+    except Exception as e:
+        L.ERR(f"Exception: {e}")
+        return False
+    
+async def save_email(md_path, md_content):
+    try:
+        with open(md_path, 'w', encoding='utf-8') as md_file:
+            md_file.write(md_content)
+
+        L.DEBUG(f"Saved markdown to {md_path}")
+        return True
+    except Exception as e:
+        L.ERR(f"Failed to save email: {e}")
+        return False
+
+def get_matching_autoresponders(this_email: IncomingEmail, account: EmailAccount) -> List[AutoResponder]:
+    L.DEBUG(f"Called get_matching_autoresponders for email \"{this_email.subject},\" account name \"{account.name}\"")
+    def matches_list(item: str, this_email: IncomingEmail) -> bool:
+        if '@' in item:
+            return item in this_email.sender
+        else:
+            return item.lower() in this_email.subject.lower() or item.lower() in this_email.body.lower()
+    matching_profiles = []
+    for profile in account.autoresponders:
+        whitelist_match = not profile.whitelist or any(matches_list(item, this_email) for item in profile.whitelist)
+        blacklist_match = any(matches_list(item, this_email) for item in profile.blacklist)
+        if whitelist_match and not blacklist_match:
+            L.DEBUG(f"We have a match for {whitelist_match} and no blacklist matches.")
+            matching_profiles.append(profile)
+        elif whitelist_match and blacklist_match:
+            L.DEBUG(f"Matched whitelist for {whitelist_match}, but also matched blacklist for {blacklist_match}")
+        else:
+            L.DEBUG(f"No whitelist or blacklist matches.")
+    return matching_profiles
+
+
 async def process_account_autoresponding(account: EmailAccount):
-    autoresponded_log = EMAIL_LOGS / account.name / "autoresponded.txt"
-    os.makedirs(autoresponded_log.parent, exist_ok = True)
+    EMAIL_AUTORESPONSE_LOG = EMAIL_LOGS / account.name / "autoresponded.txt"
+    os.makedirs(EMAIL_AUTORESPONSE_LOG.parent, exist_ok=True)
 
     while True:
         try:
-            processed_uids = await load_processed_uids(autoresponded_log)
+            processed_uids = await load_processed_uids(EMAIL_AUTORESPONSE_LOG)
             L.DEBUG(f"{len(processed_uids)} emails marked as already responded to are being ignored.")
+
             with get_imap_connection(account) as inbox:
                 unread_messages = inbox.messages(unread=True)
                 L.DEBUG(f"There are {len(unread_messages)} unread messages.")
+
                 for uid, message in unread_messages:
                     uid_str = uid.decode() if isinstance(uid, bytes) else str(uid)
                     if uid_str not in processed_uids:
-                        recipients = [EmailContact(email=recipient['email'], name=recipient.get('name', '')) for recipient in message.sent_to]
-                        localized_datetime = await locate.localize_datetime(message.date)
-                        this_email = IncomingEmail(
-                            sender=message.sent_from[0]['email'],
-                            datetime_received=localized_datetime,
-                            recipients=recipients,
-                            subject=message.subject,
-                            body=clean_email_content(message.body['html'][0]) if message.body['html'] else clean_email_content(message.body['plain'][0]) or "",
-                            attachments=message.attachments
-                        )
-                        L.DEBUG(f"Attempting autoresponse on {this_email.subject}")
-                        respond_success = await autorespond(this_email, account)
-                        if respond_success:
-                            await save_processed_uid(autoresponded_log, account.name, uid_str)
-                            L.WARN(f"Auto-responded to email: {this_email.subject}")
-                        else:
-                            L.WARN(f"Failed auto-response to {this_email.subject}")
-                    L.DEBUG(f"Skipping {uid_str} because it was already processed.")
+                        await autorespond_single_email(message, uid_str, account, EMAIL_AUTORESPONSE_LOG)
+                    else:
+                        L.DEBUG(f"Skipping {uid_str} because it was already processed.")
+
         except Exception as e:
             L.ERR(f"An error occurred during auto-responding for account {account.name}: {e}")
         
         await asyncio.sleep(account.refresh)
 
-async def process_all_accounts():
+async def autorespond_single_email(message, uid_str: str, account: EmailAccount, log_file: Path):
+    this_email = await create_incoming_email(message)
+    L.DEBUG(f"Evaluating {this_email.subject} for autoresponse-worthiness...")
     
+    matching_profiles = get_matching_autoresponders(this_email, account)
+    L.DEBUG(f"Matching profiles: {matching_profiles}")
+
+    for profile in matching_profiles:
+        response_body = await generate_response(this_email, profile, account)
+        if response_body:
+            subject = f"Re: {this_email.subject}"
+            jpg_path = await sd.workflow(profile.image_prompt, earlyout=False, downscale_to_fit=True) if profile.image_prompt else None
+            success = await send_response(this_email.sender, subject, response_body, profile, account, jpg_path)
+            if success:
+                L.WARN(f"Auto-responded to email: {this_email.subject}")
+                await save_processed_uid(log_file, account.name, uid_str)
+            else:
+                L.WARN(f"Failed to send auto-response to {this_email.subject}")
+        else:
+            L.WARN(f"Unable to generate auto-response for {this_email.subject}")
+
+async def generate_response(this_email: IncomingEmail, profile: AutoResponder, account: EmailAccount) -> Optional[str]:
+    L.INFO(f"Generating auto-response to {this_email.subject} with profile: {profile.name}")
+
+    now = await loc.dt(dt_datetime.now())
+    then = await loc.dt(this_email.datetime_received)
+    age = now - then
+    usr_prompt = f'''
+Generate a personalized auto-response to the following email:
+From: {this_email.sender}
+Sent: {age} ago
+Subject: "{this_email.subject}"
+Body: {this_email.body}
+---
+Respond on behalf of {account.fullname}, who is unable to respond personally because {profile.context}. Keep the response {profile.style} and to the point, but responsive to the sender's inquiry. Do not mention or recite this context information in your response.
+    '''
+    sys_prompt = f"You are an AI assistant helping {account.fullname} with email responses. {account.fullname} is described as: {account.bio}"
+    
+    try:
+        response = await llm.query_ollama(usr_prompt, sys_prompt, profile.ollama_model, 400)
+        L.DEBUG(f"query_ollama response: {response}")
+        
+        if isinstance(response, dict) and "message" in response and "content" in response["message"]:
+            response = response["message"]["content"]
+        
+        return response + "\n\n"
+        
+    except Exception as e:
+        L.ERR(f"Error generating auto-response: {str(e)}")
+        return None
+
+async def send_response(to_email: str, subject: str, body: str, profile: AutoResponder, image_attachment: Path = None) -> bool:
+    try:
+        message = MIMEMultipart()
+        message['From'] = profile.smtp.username
+        message['To'] = to_email
+        message['Subject'] = subject
+        message.attach(MIMEText(body, 'plain'))
+
+        if image_attachment and os.path.exists(image_attachment):
+            with open(image_attachment, 'rb') as img_file:
+                img = MIMEImage(img_file.read(), name=os.path.basename(image_attachment))
+                message.attach(img)
+
+        L.DEBUG(f"Sending auto-response to {to_email} concerning {subject} from account {profile.name}...")
+
+        with get_smtp_connection(profile) as server:
+            server.login(profile.smtp.username, profile.smtp.password)
+            server.send_message(message)
+
+        L.INFO(f"Auto-response sent to {to_email} concerning {subject} from account {profile.name}!")
+        return True
+
+    except Exception as e:
+        L.ERR(f"Error in preparing/sending auto-response from account {profile.name}: {e}")
+        return False
+
+async def create_incoming_email(message) -> IncomingEmail:
+    recipients = [EmailContact(email=recipient['email'], name=recipient.get('name', '')) for recipient in message.sent_to]
+    localized_datetime = await loc.dt(message.date)
+    return IncomingEmail(
+        sender=message.sent_from[0]['email'],
+        datetime_received=localized_datetime,
+        recipients=recipients,
+        subject=message.subject,
+        body=clean_email_content(message.body['html'][0]) if message.body['html'] else clean_email_content(message.body['plain'][0]) or "",
+        attachments=message.attachments
+    )
+
+async def load_processed_uids(filename: Path) -> Set[str]:
+    if filename.exists():
+        async with aiofiles.open(filename, 'r') as f:
+            return set(line.strip().split(':')[-1] for line in await f.readlines())
+    return set()
+
+async def save_processed_uid(filename: Path, account_name: str, uid: str):
+    async with aiofiles.open(filename, 'a') as f:
+        await f.write(f"{account_name}:{uid}\n")
+
+
+async def process_all_accounts():
     email_accounts = load_email_accounts(EMAIL_CONFIG)
-    summarization_tasks = [asyncio.create_task(process_account_summarization(account)) for account in email_accounts]
+    summarization_tasks = [asyncio.create_task(process_account_archival(account)) for account in email_accounts]
     autoresponding_tasks = [asyncio.create_task(process_account_autoresponding(account)) for account in email_accounts]
     await asyncio.gather(*summarization_tasks, *autoresponding_tasks)
 
+
 @email.on_event("startup")
 async def startup_event():
     await asyncio.sleep(5)
-    asyncio.create_task(process_all_accounts())
\ No newline at end of file
+    asyncio.create_task(process_all_accounts())
diff --git a/sijapi/routers/llm.py b/sijapi/routers/llm.py
index 0db62f8..d38c6e2 100644
--- a/sijapi/routers/llm.py
+++ b/sijapi/routers/llm.py
@@ -80,14 +80,6 @@ async def generate_response(prompt: str):
     )
     return {"response": output['response']}
 
-@llm.post("/llm/query")
-async def llm_query_endpoint(
-    message: str = Form(...),
-    file: Optional(UploadFile) = Form(...)
-):
-    return None
-
-
 
 async def query_ollama(usr: str, sys: str = LLM_SYS_MSG, model: str = DEFAULT_LLM, max_tokens: int = 200):
     messages = [{"role": "system", "content": sys},
diff --git a/sijapi/routers/note.py b/sijapi/routers/note.py
index a3d99eb..8aadcec 100644
--- a/sijapi/routers/note.py
+++ b/sijapi/routers/note.py
@@ -18,20 +18,21 @@ from markdownify import markdownify as md
 from typing import Optional, Union, Dict, List, Tuple
 from urllib.parse import urlparse
 from urllib3.util.retry import Retry
+import newspaper
 from newspaper import Article
 import trafilatura
 from readability import Document
 from requests.adapters import HTTPAdapter
 import re
 import os
-from datetime import timedelta, datetime, time as dt_time, date as dt_date
+from datetime import timedelta, datetime as dt_datetime, time as dt_time, date as dt_date
 from dateutil.parser import parse as dateutil_parse
 from fastapi import HTTPException, status
 from pathlib import Path
 from fastapi import APIRouter, Query, HTTPException
-from sijapi import L, OBSIDIAN_VAULT_DIR, OBSIDIAN_RESOURCES_DIR, ARCHIVE_DIR, BASE_URL, OBSIDIAN_BANNER_SCENE, DEFAULT_11L_VOICE, DEFAULT_VOICE, TZ, DynamicTZ
-from sijapi.routers import tts, llm, time, sd, locate, weather, asr, calendar
-from sijapi.routers.locate import Location
+from sijapi import L, OBSIDIAN_VAULT_DIR, OBSIDIAN_RESOURCES_DIR, ARCHIVE_DIR, BASE_URL, OBSIDIAN_BANNER_SCENE, DEFAULT_11L_VOICE, DEFAULT_VOICE, TZ, DynamicTZ, GEO
+from sijapi.routers import cal, loc, tts, llm, time, sd, weather, asr
+from sijapi.routers.loc import Location
 from sijapi.utilities import assemble_journal_path, assemble_archive_path, convert_to_12_hour_format, sanitize_filename, convert_degrees_to_cardinal, HOURLY_COLUMNS_MAPPING
 
 
@@ -39,16 +40,17 @@ note = APIRouter()
 
 
 
+### Daily Note Builder ###
 
 @note.get("/note/bulk/{dt_start}/{dt_end}")
 async def build_daily_note_range_endpoint(dt_start: str, dt_end: str):
-    start_date = datetime.strptime(dt_start, "%Y-%m-%d")
-    end_date = datetime.strptime(dt_end, "%Y-%m-%d")
+    start_date = dt_datetime.strptime(dt_start, "%Y-%m-%d")
+    end_date = dt_datetime.strptime(dt_end, "%Y-%m-%d")
     
     results = []
     current_date = start_date
     while current_date <= end_date:
-        formatted_date = await locate.localize_datetime(current_date)
+        formatted_date = await loc.dt(current_date)
         result = await build_daily_note(formatted_date)
         results.append(result)
         current_date += timedelta(days=1)
@@ -59,13 +61,13 @@ async def build_daily_note_range_endpoint(dt_start: str, dt_end: str):
 
 @note.post("/note/create")
 async def build_daily_note_endpoint(
-    date_str: Optional[str] = Form(datetime.now().strftime("%Y-%m-%d")),
+    date_str: Optional[str] = Form(dt_datetime.now().strftime("%Y-%m-%d")),
     location: Optional[str] = Form(None)
 ):
     lat, lon = None, None
     try:
         if not date_str:
-            date_str = datetime.now().strftime("%Y-%m-%d")
+            date_str = dt_datetime.now().strftime("%Y-%m-%d")
         if location:
             lat, lon = map(float, location.split(','))
             tz = ZoneInfo(DynamicTZ.find(lat, lon))
@@ -75,31 +77,35 @@ async def build_daily_note_endpoint(
     except (ValueError, AttributeError, TypeError) as e:
         L.WARN(f"Falling back to localized datetime due to error: {e}")
         try:
-            date_time = locate.localize_datetime(date_str)
-            places = await locate.fetch_locations(date_time)
+            date_time = loc.dt(date_str)
+            places = await loc.fetch_locations(date_time)
             lat, lon = places[0].latitude, places[0].longitude
         except Exception as e:
             return JSONResponse(content={"error": str(e)}, status_code=400)
 
     path = await build_daily_note(date_time, lat, lon)
-    return JSONResponse(content={"path": path}, status_code=200)
 
-async def build_daily_note(date_time: datetime, lat: float = None, lon: float = None):
+    path_str = str(path)  # Convert PosixPath to string
+    
+    return JSONResponse(content={"path": path_str}, status_code=200)
+
+
+async def build_daily_note(date_time: dt_datetime, lat: float = None, lon: float = None):
     '''
 Obsidian helper. Takes a datetime and creates a new daily note. Note: it uses the sijapi configuration file to place the daily note and does NOT presently interface with Obsidian's daily note or periodic notes extensions. It is your responsibility to ensure they match.
     '''
     absolute_path, _ = assemble_journal_path(date_time)
-
+    L.WARN(f"Using {date_time.strftime('%Y-%m-%d %H:%M:%S')} as our datetime in build_daily_note.")
     formatted_day = date_time.strftime("%A %B %d, %Y")  # Monday May 27, 2024 formatting
     day_before = (date_time - timedelta(days=1)).strftime("%Y-%m-%d %A")  # 2024-05-26 Sunday formatting
     day_after = (date_time + timedelta(days=1)).strftime("%Y-%m-%d %A")  # 2024-05-28 Tuesday formatting
     header = f"# [[{day_before}|← ]] {formatted_day} [[{day_after}| →]]\n\n"
     
     if not lat or not lon:
-        places = await locate.fetch_locations(date_time)
+        places = await loc.fetch_locations(date_time)
         lat, lon = places[0].latitude, places[0].longitude
 
-    location = await locate.reverse_geocode(lat, lon)
+    location = await GEO.code(lat, lon)
     
     timeslips = await build_daily_timeslips(date_time)
 
@@ -126,7 +132,7 @@ date: "{fm_day}"
 banner: "![[{banner_path}]]"
 tags:
  - daily-note
-created: "{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}"
+created: "{dt_datetime.now().strftime("%Y-%m-%d %H:%M:%S")}"
 ---
     
 {header}
@@ -153,6 +159,8 @@ created: "{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}"
     return absolute_path
     
 
+### Daily Note Component Builders ###
+
 async def build_daily_timeslips(date):
     '''
 
@@ -166,469 +174,13 @@ async def build_daily_timeslips(date):
     return f"![[{relative_path}]]"
 
 
-### CLIPPER ###
-@note.post("/clip")
-async def clip_post(
-    bg_tasks: BackgroundTasks,
-    url: Optional[str] = Form(None),
-    source: Optional[str] = Form(None),
-    title: Optional[str] = Form(None),
-    tts: str = Form('summary'),
-    voice: str = Form(DEFAULT_VOICE),
-    encoding: str = Form('utf-8')
-):
-    markdown_filename = await process_article(bg_tasks, url, title, encoding, source, tts, voice)
-    return {"message": "Clip saved successfully", "markdown_filename": markdown_filename}
-
-@note.post("/archive")
-async def archive_post(
-    url: Optional[str] = Form(None),
-    source: Optional[str] = Form(None),
-    title: Optional[str] = Form(None),
-    encoding: str = Form('utf-8')
-):
-    markdown_filename = await process_archive(url, title, encoding, source)
-    return {"message": "Clip saved successfully", "markdown_filename": markdown_filename}
-
-@note.get("/clip")
-async def clip_get(
-    bg_tasks: BackgroundTasks,
-    url: str,
-    title: Optional[str] = Query(None),
-    encoding: str = Query('utf-8'),
-    tts: str = Query('summary'),
-    voice: str = Query(DEFAULT_VOICE)
-):
-    markdown_filename = await process_article(bg_tasks, url, title, encoding, tts=tts, voice=voice)
-    return {"message": "Clip saved successfully", "markdown_filename": markdown_filename}
-
-@note.post("/note/add")
-async def note_add_endpoint(file: Optional[UploadFile] = File(None), text: Optional[str] = Form(None), source: Optional[str] = Form(None), bg_tasks: BackgroundTasks = None):
-    L.DEBUG(f"Received request on /note/add...")
-    if not file and not text:
-        L.WARN(f"... without any file or text!")
-        raise HTTPException(status_code=400, detail="Either text or a file must be provided")
-    else:
-        result = await process_for_daily_note(file, text, source, bg_tasks)
-        L.INFO(f"Result on /note/add: {result}")
-        return JSONResponse(result, status_code=204)
-
-async def process_for_daily_note(file: Optional[UploadFile] = File(None), text: Optional[str] = None, source: Optional[str] = None, bg_tasks: BackgroundTasks = None):
-    now = datetime.now()
-    transcription_entry = ""
-    file_entry = ""
-    if file:
-        L.DEBUG("File received...")
-        file_content = await file.read()
-        audio_io = BytesIO(file_content)
-        
-        # Improve error handling for file type guessing
-        guessed_type = mimetypes.guess_type(file.filename)
-        file_type = guessed_type[0] if guessed_type[0] else "application/octet-stream"
-        
-        L.DEBUG(f"Processing as {file_type}...")
-        
-        # Extract the main type (e.g., 'audio', 'image', 'video')
-        main_type = file_type.split('/')[0]
-        subdir = main_type.title() if main_type else "Documents"
-        
-        absolute_path, relative_path = assemble_journal_path(now, subdir=subdir, filename=file.filename)
-        L.DEBUG(f"Destination path: {absolute_path}")
-        
-        with open(absolute_path, 'wb') as f:
-            f.write(file_content)
-        L.DEBUG(f"Processing {f.name}...")
-        
-        if main_type == 'audio':
-            transcription = await asr.transcribe_audio(file_path=absolute_path, params=asr.TranscribeParams(model="small-en", language="en", threads=6))
-            file_entry = f"![[{relative_path}]]"
-        elif main_type == 'image':
-            file_entry = f"![[{relative_path}]]"
-        else:
-            file_entry = f"[Source]({relative_path})"
-    
-    text_entry = text if text else ""
-    L.DEBUG(f"transcription: {transcription_entry}\nfile_entry: {file_entry}\ntext_entry: {text_entry}")
-    return await add_to_daily_note(transcription_entry, file_entry, text_entry, now)
-
-async def add_to_daily_note(transcription: str = None, file_link: str = None, additional_text: str = None, date_time: datetime = None):
-    date_time = date_time or datetime.now()
-    note_path, _ = assemble_journal_path(date_time, filename='Notes', extension=".md", no_timestamp = True)
-    time_str = date_time.strftime("%H:%M")
-    
-    entry_lines = []
-    if additional_text and additional_text.strip():
-        entry_lines.append(f"\t* {additional_text.strip()}") 
-    if transcription and transcription.strip():
-        entry_lines.append(f"\t* {transcription.strip()}") 
-    if file_link and file_link.strip():
-        entry_lines.append(f"\t\t {file_link.strip()}")
-
-    entry = f"\n* **{time_str}**\n" + "\n".join(entry_lines)
-
-    # Write the entry to the end of the file
-    if note_path.exists():
-        with open(note_path, 'a', encoding='utf-8') as note_file:
-            note_file.write(entry)
-    else: 
-        date_str = date_time.strftime("%Y-%m-%d")
-        frontmatter = f"""---
-date: {date_str}
-tags:
- - notes
----
-
-"""
-        content = frontmatter + entry
-        # If the file doesn't exist, create it and start with "Notes"
-        with open(note_path, 'w', encoding='utf-8') as note_file:
-            note_file.write(content)
-
-    return entry
-
-async def handle_text(title:str, summary:str, extracted_text:str, date_time: datetime = None):
-    date_time = date_time if date_time else datetime.now()
-    absolute_path, relative_path = assemble_journal_path(date_time, filename=title, extension=".md", no_timestamp = True)
-    with open(absolute_path, "w") as file:
-        file.write(f"# {title}\n\n## Summary\n{summary}\n\n## Transcript\n{extracted_text}")
-        
-    # add_to_daily_note(f"**Uploaded [[{title}]]**: *{summary}*", absolute_path)
-
-    return True
-
-
-async def process_document(
-    bg_tasks: BackgroundTasks,
-    document: File,
-    title: Optional[str] = None,
-    tts_mode: str = "summary",
-    voice: str = DEFAULT_VOICE
-):
-    timestamp = datetime.now().strftime('%b %d, %Y at %H:%M')
-
-    # Save the document to OBSIDIAN_RESOURCES_DIR
-    document_content = await document.read()
-    file_path = Path(OBSIDIAN_VAULT_DIR) / OBSIDIAN_RESOURCES_DIR / document.filename
-    with open(file_path, 'wb') as f:
-        f.write(document_content)
-
-    parsed_content = await llm.extract_text(file_path)  # Ensure extract_text is awaited
-
-    llm_title, summary = await llm.title_and_summary(parsed_content)
-    try:
-        readable_title = sanitize_filename(title if title else document.filename)
-
-        if tts_mode == "full" or tts_mode == "content" or tts_mode == "body":
-            tts_text = parsed_content
-        elif tts_mode == "summary" or tts_mode == "excerpt":
-            tts_text = summary
-        else:
-            tts_text = None
-
-        frontmatter = f"""---
-title: {readable_title}
-added: {timestamp}
----
-"""
-        body = f"# {readable_title}\n\n"
-
-        if tts_text:
-            try:
-                datetime_str = datetime.now().strftime("%Y%m%d%H%M%S")
-                audio_filename = f"{datetime_str} {readable_title}"
-                audio_path = await tts.generate_speech(
-                    bg_tasks=bg_tasks,
-                    text=tts_text,
-                    voice=voice,
-                    model="eleven_turbo_v2",
-                    podcast=True,
-                    title=audio_filename,
-                    output_dir=Path(OBSIDIAN_VAULT_DIR) / OBSIDIAN_RESOURCES_DIR
-                )
-                audio_ext = Path(audio_path).suffix
-                obsidian_link = f"![[{OBSIDIAN_RESOURCES_DIR}/{audio_filename}{audio_ext}]]"
-                body += f"{obsidian_link}\n\n"
-            except Exception as e:
-                L.ERR(f"Failed in the TTS portion of clipping: {e}")
-
-        body += f"> [!summary]+\n"
-        body += f"> {summary}\n\n"
-        body += parsed_content
-        markdown_content = frontmatter + body
-
-        markdown_filename = f"{readable_title}.md"
-        encoding = 'utf-8'
-
-        with open(markdown_filename, 'w', encoding=encoding) as md_file:
-            md_file.write(markdown_content)
-
-        L.INFO(f"Successfully saved to {markdown_filename}")
-
-        return markdown_filename
-
-    except Exception as e:
-        L.ERR(f"Failed to clip: {str(e)}")
-        raise HTTPException(status_code=500, detail=str(e))
-
-
-async def process_article(
-    bg_tasks: BackgroundTasks,
-    url: str,
-    title: Optional[str] = None,
-    encoding: str = 'utf-8',
-    source: Optional[str] = None,
-    tts_mode: str = "summary", 
-    voice: str = DEFAULT_11L_VOICE
-):
-
-    timestamp = datetime.now().strftime('%b %d, %Y at %H:%M')
-
-    parsed_content = await parse_article(url, source)
-    if parsed_content is None:
-        return {"error": "Failed to retrieve content"}
-
-    readable_title = sanitize_filename(title or parsed_content.get("title") or timestamp)
-    markdown_filename, relative_path = assemble_journal_path(datetime.now(), subdir="Articles", filename=readable_title, extension=".md")
-
-    try:
-        summary = await llm.summarize_text(parsed_content["content"], "Summarize the provided text. Respond with the summary and nothing else. Do not otherwise acknowledge the request. Just provide the requested summary.")
-        summary = summary.replace('\n', ' ')  # Remove line breaks
-
-        if tts_mode == "full" or tts_mode == "content":
-            tts_text = parsed_content["content"]
-        elif tts_mode == "summary" or tts_mode == "excerpt":
-            tts_text = summary
-        else:
-            tts_text = None
-
-        banner_markdown = ''
-        try:
-            banner_url = parsed_content.get('image', '')
-            if banner_url != '':
-                banner_image = download_file(banner_url, Path(OBSIDIAN_VAULT_DIR / OBSIDIAN_RESOURCES_DIR))
-                if banner_image:
-                    banner_markdown = f"![[{OBSIDIAN_RESOURCES_DIR}/{banner_image}]]"
-                
-        except Exception as e:
-            L.ERR(f"No image found in article")
-
-        authors = ', '.join('[[{}]]'.format(author) for author in parsed_content.get('authors', ['Unknown']))
-
-        frontmatter = f"""---
-title: {readable_title}
-authors: {', '.join('[[{}]]'.format(author) for author in parsed_content.get('authors', ['Unknown']))}
-published: {parsed_content.get('date_published', 'Unknown')}
-added: {timestamp}
-excerpt: {parsed_content.get('excerpt', '')}
-banner: "{banner_markdown}"
-tags:
-
-"""
-        frontmatter += '\n'.join(f" - {tag}" for tag in parsed_content.get('tags', []))
-        frontmatter += '\n---\n'
-
-        body = f"# {readable_title}\n\n"
-
-        if tts_text:
-            datetime_str = datetime.now().strftime("%Y%m%d%H%M%S")
-            audio_filename = f"{datetime_str} {readable_title}"
-            try:
-                audio_path = await tts.generate_speech(bg_tasks=bg_tasks, text=tts_text, voice=voice, model="eleven_turbo_v2", podcast=True, title=audio_filename,
-                output_dir=Path(OBSIDIAN_VAULT_DIR) / OBSIDIAN_RESOURCES_DIR)
-                audio_ext = Path(audio_path).suffix
-                obsidian_link = f"![[{OBSIDIAN_RESOURCES_DIR}/{audio_filename}{audio_ext}]]"
-                body += f"{obsidian_link}\n\n"
-            except Exception as e:
-                L.ERR(f"Failed to generate TTS for np3k. {e}")
-
-        try:
-            body += f"by {authors} in [{parsed_content.get('domain', urlparse(url).netloc.replace('www.', ''))}]({url}).\n\n"
-            body += f"> [!summary]+\n"
-            body += f"> {summary}\n\n"
-            body += parsed_content["content"]
-            markdown_content = frontmatter + body
-
-        except Exception as e:
-            L.ERR(f"Failed to combine elements of article markdown.")
-
-        try:
-            with open(markdown_filename, 'w', encoding=encoding) as md_file:
-                md_file.write(markdown_content)
-
-            L.INFO(f"Successfully saved to {markdown_filename}")
-            add_to_daily_note
-            return markdown_filename
-        
-        except Exception as e:
-            L.ERR(f"Failed to write markdown file")
-            raise HTTPException(status_code=500, detail=str(e))
-        
-    except Exception as e:
-        L.ERR(f"Failed to clip {url}: {str(e)}")
-        raise HTTPException(status_code=500, detail=str(e))
-
-
-async def parse_article(url: str, source: Optional[str] = None):
-    source = source if source else trafilatura.fetch_url(url)
-    traf = trafilatura.extract_metadata(filecontent=source, default_url=url)
-
-    # Pass the HTML content to newspaper3k:
-    np3k = Article(url)
-    np3k.set_html(source)
-    np3k.parse()
-
-    L.INFO(f"Parsed {np3k.title}")
-    
-
-    title = (np3k.title or traf.title) or url
-    authors = np3k.authors or traf.author
-    authors = (authors if isinstance(authors, List) else [authors])
-    date = np3k.publish_date or traf.date
-    try:
-        date = await locate.localize_datetime(date)
-    except:
-        L.DEBUG(f"Failed to localize {date}")
-        date = await locate.localize_datetime(datetime.now())
-    excerpt = np3k.meta_description or traf.description
-    content = trafilatura.extract(source, output_format="markdown", include_comments=False) or np3k.text
-    image = np3k.top_image or traf.image
-    domain = traf.sitename or urlparse(url).netloc.replace('www.', '').title()
-    tags = np3k.meta_keywords or traf.categories or traf.tags
-    tags = tags if isinstance(tags, List) else [tags]
-    
-    return {
-        'title': title.replace("  ", " "),
-        'authors': authors,
-        'date': date.strftime("%b %d, %Y at %H:%M"),
-        'excerpt': excerpt,
-        'content': content,
-        'image': image,
-        'url': url,
-        'domain': domain,
-        'tags': np3k.meta_keywords
-    }
-
-
-async def html_to_markdown(url: str = None, source: str = None) -> Optional[str]:
-    if source:
-        html_content = source
-    elif url:
-        async with aiohttp.ClientSession() as session:
-            async with session.get(url) as response:
-                html_content = await response.text()
-    else:
-        L.ERR(f"Unable to convert nothing to markdown.")
-        return None
-
-    # Use readability to extract the main content
-    doc = Document(html_content)
-    cleaned_html = doc.summary()
-
-    # Parse the cleaned HTML with BeautifulSoup for any additional processing
-    soup = BeautifulSoup(cleaned_html, 'html.parser')
-
-    # Remove any remaining unwanted elements
-    for element in soup(['script', 'style']):
-        element.decompose()
-
-    # Convert to markdown
-    markdown_content = md(str(soup), heading_style="ATX")
-
-    return markdown_content
-
-
-async def process_archive(
-    url: str,
-    title: Optional[str] = None,
-    encoding: str = 'utf-8',
-    source: Optional[str] = None,
-) -> Path:
-    timestamp = datetime.now().strftime('%b %d, %Y at %H:%M')
-    readable_title = title if title else f"{url} - {timestamp}"
-    
-    content = await html_to_markdown(url, source)
-    if content is None:
-        raise HTTPException(status_code=400, detail="Failed to convert content to markdown")
-    
-    markdown_path, relative_path = assemble_archive_path(readable_title, ".md")
-    
-    markdown_content = f"---\n"
-    markdown_content += f"title: {readable_title}\n"
-    markdown_content += f"added: {timestamp}\n"
-    markdown_content += f"url: {url}"
-    markdown_content += f"date: {datetime.now().strftime('%Y-%m-%d')}"
-    markdown_content += f"---\n\n"
-    markdown_content += f"# {readable_title}\n\n"
-    markdown_content += f"Clipped from [{url}]({url}) on {timestamp}"
-    markdown_content += content
-    
-    try:
-        markdown_path.parent.mkdir(parents=True, exist_ok=True)
-        with open(markdown_path, 'w', encoding=encoding) as md_file:
-            md_file.write(markdown_content)
-        L.DEBUG(f"Successfully saved to {markdown_path}")
-        return markdown_path
-    except Exception as e:
-        L.WARN(f"Failed to write markdown file: {str(e)}")
-        return None
-
-def download_file(url, folder):
-    os.makedirs(folder, exist_ok=True)
-    filename = str(uuid.uuid4()) + os.path.splitext(urlparse(url).path)[-1]
-    filepath = os.path.join(folder, filename)
-    
-    session = requests.Session()
-    retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
-    session.mount('http://', HTTPAdapter(max_retries=retries))
-    session.mount('https://', HTTPAdapter(max_retries=retries))
-
-    headers = {
-        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
-    }
-
-    try:
-        response = session.get(url, headers=headers, timeout=10)
-        if response.status_code == 200:
-            if 'image' in response.headers.get('Content-Type', ''):
-                with open(filepath, 'wb') as f:
-                    f.write(response.content)
-            else:
-                L.ERR(f"Failed to download image: {url}, invalid content type: {response.headers.get('Content-Type')}")
-                return None
-        else:
-            L.ERR(f"Failed to download image: {url}, status code: {response.status_code}")
-            return None
-    except Exception as e:
-        L.ERR(f"Failed to download image: {url}, error: {str(e)}")
-        return None
-    return filename
-
-def copy_file(local_path, folder):
-    os.makedirs(folder, exist_ok=True)
-    filename = os.path.basename(local_path)
-    destination_path = os.path.join(folder, filename)
-    shutil.copy(local_path, destination_path)
-    return filename
-
-
-async def save_file(file: UploadFile, folder: Path) -> Path:
-    file_path = folder / f"{datetime.now().strftime('%Y%m%d_%H%M%S')}_{file.filename}"
-    with open(file_path, 'wb') as f:
-        shutil.copyfileobj(file.file, f)
-    return file_path
-
-
-
-    
-### FRONTMATTER, BANNER
-
 @note.put("/note/update_frontmatter")
 async def update_frontmatter_endpoint(date: str, key: str, value: str):
-    date_time = datetime.strptime(date, "%Y-%m-%d")
+    date_time = dt_datetime.strptime(date, "%Y-%m-%d")
     result = await update_frontmatter(date_time, key, value)
     return result
     
-async def update_frontmatter(date_time: datetime, key: str, value: str):
+async def update_frontmatter(date_time: dt_datetime, key: str, value: str):
     # Parse the date and format paths
     file_path, relative_path = assemble_journal_path(date_time)
 
@@ -682,14 +234,18 @@ async def banner_endpoint(dt: str, location: str = None, mood: str = None, other
         Endpoint (POST) that generates a new banner image for the Obsidian daily note for a specified date, taking into account optional additional information, then updates the frontmatter if necessary.
     '''
     L.DEBUG(f"banner_endpoint requested with date: {dt} ({type(dt)})")
-    date_time = await locate.localize_datetime(dt)
+    date_time = await loc.dt(dt)
     L.DEBUG(f"date_time after localization: {date_time} ({type(date_time)})")
     jpg_path = await generate_banner(date_time, location, mood=mood, other_context=other_context)
     return jpg_path
 
 
-async def get_note(date_time: datetime):
-    date_time = await locate.localize_datetime(date_time);
+
+
+
+
+async def get_note(date_time: dt_datetime):
+    date_time = await loc.dt(date_time);
     absolute_path, local_path = assemble_journal_path(date_time, filename = "Notes", extension = ".md", no_timestamp = True)
 
     if absolute_path.is_file():
@@ -697,7 +253,7 @@ async def get_note(date_time: datetime):
             content = file.read()
         return content if content else None
 
-async def sentiment_analysis(date_time: datetime):
+async def sentiment_analysis(date_time: dt_datetime):
     most_recent_note = await get_note(date_time)
     most_recent_note = most_recent_note or await get_note(date_time - timedelta(days=1))
     if most_recent_note:
@@ -711,20 +267,20 @@ async def sentiment_analysis(date_time: datetime):
 
 async def generate_banner(dt, location: Location = None, forecast: str = None, mood: str = None, other_context: str = None):
     # L.DEBUG(f"Location: {location}, forecast: {forecast}, mood: {mood}, other_context: {other_context}")
-    date_time = await locate.localize_datetime(dt)
+    date_time = await loc.dt(dt)
     L.DEBUG(f"generate_banner called with date_time: {date_time}")
     destination_path, local_path = assemble_journal_path(date_time, filename="Banner", extension=".jpg", no_timestamp = True)
     L.DEBUG(f"destination path generated: {destination_path}")
     
     if not location:
-        locations = await locate.fetch_locations(date_time)
+        locations = await loc.fetch_locations(date_time)
         if locations:
             location = locations[0]
     
     display_name = "Location: "
     if location:
         lat, lon = location.latitude, location.longitude
-        override_location = await locate.find_override_locations(lat, lon)
+        override_location = await loc.find_override_locations(lat, lon)
         display_name += f"{override_location}, " if override_location else ""
         if location.display_name:
             display_name += f"{location.display_name}"
@@ -739,7 +295,7 @@ async def generate_banner(dt, location: Location = None, forecast: str = None, m
             display_name += f"{location.country} " if location.country else ""
 
         if display_name == "Location: ":
-            geocoded_location = await locate.reverse_geocode(lat, lon)
+            geocoded_location = await GEO.code(lat, lon)
             if geocoded_location.display_name or geocoded_location.city or geocoded_location.country:
                 return await generate_banner(dt, geocoded_location, forecast, mood, other_context)
             else:
@@ -757,7 +313,7 @@ async def generate_banner(dt, location: Location = None, forecast: str = None, m
     elif sentiment and not mood: mood = f"Mood: {sentiment}"
     else: mood = ""
 
-    events = await calendar.get_events(date_time, date_time)
+    events = await cal.get_events(date_time, date_time)
     formatted_events = []
     for event in events:
         event_str = event.get('name')
@@ -792,7 +348,8 @@ async def note_weather_get(
 ):
 
     try:
-        date_time = datetime.now() if date == "0" else await locate.localize_datetime(date)
+        date_time = dt_datetime.now() if date == "0" else await loc.dt(date)
+        L.WARN(f"Using {date_time.strftime('%Y-%m-%d %H:%M:%S')} as our dt_datetime in note_weather_get.")
         L.DEBUG(f"date: {date} .. date_time: {date_time}")
         content = await update_dn_weather(date_time) #, lat, lon)
         return JSONResponse(content={"forecast": content}, status_code=200)
@@ -807,26 +364,27 @@ async def note_weather_get(
 
 @note.post("/update/note/{date}")
 async def post_update_daily_weather_and_calendar_and_timeslips(date: str) -> PlainTextResponse:
-    date_time = await locate.localize_datetime(date)
+    date_time = await loc.dt(date)
+    L.WARN(f"Using {date_time.strftime('%Y-%m-%d %H:%M:%S')} as our dt_datetime in post_update_daily_weather_and_calendar_and_timeslips.")
     await update_dn_weather(date_time)
     await update_daily_note_events(date_time)
     await build_daily_timeslips(date_time)
     return f"[Refresh]({BASE_URL}/update/note/{date_time.strftime('%Y-%m-%d')}"
 
-async def update_dn_weather(date_time: datetime, lat: float = None, lon: float = None):
+async def update_dn_weather(date_time: dt_datetime, lat: float = None, lon: float = None):
+    L.WARN(f"Using {date_time.strftime('%Y-%m-%d %H:%M:%S')} as our datetime in update_dn_weather.")
     try:
         if lat and lon:
-            place = locate.reverse_geocode(lat, lon)
+            place = GEO.code(lat, lon)
 
         else:
             L.DEBUG(f"Updating weather for {date_time}")
-
-            places = await locate.fetch_locations(date_time)
+            places = await loc.fetch_locations(date_time)
             place = places[0]
             lat = place.latitude
             lon = place.longitude
         
-        city = await locate.find_override_locations(lat, lon)
+        city = await loc.find_override_locations(lat, lon)
         if city:
             L.INFO(f"Using override location: {city}")
 
@@ -836,7 +394,7 @@ async def update_dn_weather(date_time: datetime, lat: float = None, lon: float =
                 L.INFO(f"City in data: {city}")
 
             else:
-                loc = await locate.reverse_geocode(lat, lon)
+                loc = await GEO.code(lat, lon)
                 L.DEBUG(f"loc: {loc}")
                 city = loc.name
                 city = city if city else loc.city
@@ -849,7 +407,7 @@ async def update_dn_weather(date_time: datetime, lat: float = None, lon: float =
         L.DEBUG(f"Journal path: absolute_path={absolute_path}, relative_path={relative_path}")
 
         try:
-            L.DEBUG(f"passing date_time {date_time}, {lat}/{lon} into fetch_and_store")
+            L.DEBUG(f"passing date_time {date_time.strftime('%Y-%m-%d %H:%M:%S')}, {lat}/{lon} into fetch_and_store")
             day = await weather.get_weather(date_time, lat, lon)
             L.DEBUG(f"day information obtained from get_weather: {day}")
             if day:
@@ -887,13 +445,13 @@ async def update_dn_weather(date_time: datetime, lat: float = None, lon: float =
                     
 
                     date_str = date_time.strftime("%Y-%m-%d")
-                    now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+                    now = dt_datetime.now().strftime("%Y-%m-%d %H:%M:%S")
 
                     detailed_forecast = (
                         f"---\n"
                         f"date: {date_str}\n"
-                        f"latitude: {lat}"
-                        f"longitude: {lon}"
+                        f"latitude: {lat}\n"
+                        f"longitude: {lon}\n"
                         f"tags:\n"
                         f" - weather\n"
                         f"updated: {now}\n"
@@ -951,7 +509,6 @@ async def update_dn_weather(date_time: datetime, lat: float = None, lon: float =
             L.ERR(traceback.format_exc())
             raise HTTPException(status_code=999, detail=f"Error: {e}")
 
-
     except ValueError as ve:
         L.ERR(f"Value error in update_dn_weather: {str(ve)}")
         L.ERR(traceback.format_exc())
@@ -970,7 +527,6 @@ def format_hourly_time(hour):
         L.ERR(f"Error in format_hourly_time: {str(e)}")
         L.ERR(traceback.format_exc())
         return ""
-
     
 def format_hourly_icon(hour, sunrise, sunset):
     try:
@@ -1121,10 +677,6 @@ def get_weather_emoji(weather_condition):
         return "⛅"
     else:
         return "🌡️"  # Default emoji for unclassified weather
-    
-
-### CALENDAR ###
-
 
 async def format_events_as_markdown(event_data: Dict[str, Union[str, List[Dict[str, str]]]]) -> str:
     def remove_characters(s: str) -> str:
@@ -1134,10 +686,10 @@ async def format_events_as_markdown(event_data: Dict[str, Union[str, List[Dict[s
         return s
     
     date_str = event_data["date"]
-    now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+    now = dt_datetime.now().strftime("%Y-%m-%d %H:%M:%S")
     events_markdown = []
 
-    event_data["events"] = sorted(event_data["events"], key=lambda event: (not event['all_day'], datetime.strptime(event['start'], "%H:%M")), reverse=False)
+    event_data["events"] = sorted(event_data["events"], key=lambda event: (not event['all_day'], dt_datetime.strptime(event['start'], "%H:%M")), reverse=False)
 
     total_events = len(event_data["events"])
     event_markdown = f"```ad-events"
@@ -1156,7 +708,7 @@ async def format_events_as_markdown(event_data: Dict[str, Union[str, List[Dict[s
             event_name = event['name'][:80]
             markdown_name = f"[{event_name}]({url})"
 
-            if (event['all_day']) or (event['start'] == event['end'] == "00:00") or (datetime.combine(dt_date.min, datetime.strptime(event['end'], "%H:%M").time()) - datetime.combine(dt_date.min, datetime.strptime(event['start'], "%H:%M").time()) >= timedelta(hours=23, minutes=59)):
+            if (event['all_day']) or (event['start'] == event['end'] == "00:00") or (dt_datetime.combine(dt_date.min, dt_datetime.strptime(event['end'], "%H:%M").time()) - dt_datetime.combine(dt_date.min, dt_datetime.strptime(event['start'], "%H:%M").time()) >= timedelta(hours=23, minutes=59)):
                 event_markdown += f"\n - [ ] **{markdown_name}** (All day)"
 
             else:
@@ -1215,14 +767,14 @@ async def format_events_as_markdown(event_data: Dict[str, Union[str, List[Dict[s
 @note.get("/note/events", response_class=PlainTextResponse)
 async def note_events_endpoint(date: str = Query(None)):
         
-    date_time = await locate.localize_datetime(date) if date else datetime.now(TZ)
+    date_time = await loc.dt(date) if date else await loc.dt(dt_datetime.now())
     response = await update_daily_note_events(date_time)
     return PlainTextResponse(content=response, status_code=200)
 
-async def update_daily_note_events(date_time: datetime):
+async def update_daily_note_events(date_time: dt_datetime):
     L.DEBUG(f"Looking up events on date: {date_time.strftime('%Y-%m-%d')}")
     try:    
-        events = await calendar.get_events(date_time, date_time)
+        events = await cal.get_events(date_time, date_time)
         L.DEBUG(f"Raw events: {events}")
         event_data = {
             "date": date_time.strftime('%Y-%m-%d'),
@@ -1242,3 +794,537 @@ async def update_daily_note_events(date_time: datetime):
         L.ERR(f"Error processing events: {e}")
         raise HTTPException(status_code=500, detail=str(e))
 
+
+
+
+
+### CLIPPER ###
+@note.post("/clip")
+async def clip_post(
+    bg_tasks: BackgroundTasks,
+    url: Optional[str] = Form(None),
+    source: Optional[str] = Form(None),
+    title: Optional[str] = Form(None),
+    tts: str = Form('summary'),
+    voice: str = Form(DEFAULT_VOICE),
+    encoding: str = Form('utf-8')
+):
+    markdown_filename = await process_article(bg_tasks, url, title, encoding, source, tts, voice)
+    return {"message": "Clip saved successfully", "markdown_filename": markdown_filename}
+
+@note.post("/archive")
+async def archive_post(
+    url: Optional[str] = Form(None),
+    source: Optional[str] = Form(None),
+    title: Optional[str] = Form(None),
+    encoding: str = Form('utf-8')
+):
+    markdown_filename = await process_archive(url, title, encoding, source)
+    return {"message": "Clip saved successfully", "markdown_filename": markdown_filename}
+
+@note.get("/clip")
+async def clip_get(
+    bg_tasks: BackgroundTasks,
+    url: str,
+    title: Optional[str] = Query(None),
+    encoding: str = Query('utf-8'),
+    tts: str = Query('summary'),
+    voice: str = Query(DEFAULT_VOICE)
+):
+    markdown_filename = await process_article(bg_tasks, url, title, encoding, tts=tts, voice=voice)
+    return {"message": "Clip saved successfully", "markdown_filename": markdown_filename}
+
+@note.post("/note/add")
+async def note_add_endpoint(file: Optional[UploadFile] = File(None), text: Optional[str] = Form(None), source: Optional[str] = Form(None), bg_tasks: BackgroundTasks = None):
+    L.DEBUG(f"Received request on /note/add...")
+    if not file and not text:
+        L.WARN(f"... without any file or text!")
+        raise HTTPException(status_code=400, detail="Either text or a file must be provided")
+    else:
+        result = await process_for_daily_note(file, text, source, bg_tasks)
+        L.INFO(f"Result on /note/add: {result}")
+        return JSONResponse(result, status_code=204)
+
+async def process_for_daily_note(file: Optional[UploadFile] = File(None), text: Optional[str] = None, source: Optional[str] = None, bg_tasks: BackgroundTasks = None):
+    now = dt_datetime.now()
+    transcription_entry = ""
+    file_entry = ""
+    if file:
+        L.DEBUG("File received...")
+        file_content = await file.read()
+        audio_io = BytesIO(file_content)
+        
+        # Improve error handling for file type guessing
+        guessed_type = mimetypes.guess_type(file.filename)
+        file_type = guessed_type[0] if guessed_type[0] else "application/octet-stream"
+        
+        L.DEBUG(f"Processing as {file_type}...")
+        
+        # Extract the main type (e.g., 'audio', 'image', 'video')
+        main_type = file_type.split('/')[0]
+        subdir = main_type.title() if main_type else "Documents"
+        
+        absolute_path, relative_path = assemble_journal_path(now, subdir=subdir, filename=file.filename)
+        L.DEBUG(f"Destination path: {absolute_path}")
+        
+        with open(absolute_path, 'wb') as f:
+            f.write(file_content)
+        L.DEBUG(f"Processing {f.name}...")
+        
+        if main_type == 'audio':
+            transcription = await asr.transcribe_audio(file_path=absolute_path, params=asr.TranscribeParams(model="small-en", language="en", threads=6))
+            file_entry = f"![[{relative_path}]]"
+        elif main_type == 'image':
+            file_entry = f"![[{relative_path}]]"
+        else:
+            file_entry = f"[Source]({relative_path})"
+    
+    text_entry = text if text else ""
+    L.DEBUG(f"transcription: {transcription_entry}\nfile_entry: {file_entry}\ntext_entry: {text_entry}")
+    return await add_to_daily_note(transcription_entry, file_entry, text_entry, now)
+
+async def add_to_daily_note(transcription: str = None, file_link: str = None, additional_text: str = None, date_time: dt_datetime = None):
+    date_time = date_time or dt_datetime.now()
+    note_path, _ = assemble_journal_path(date_time, filename='Notes', extension=".md", no_timestamp = True)
+    time_str = date_time.strftime("%H:%M")
+    
+    entry_lines = []
+    if additional_text and additional_text.strip():
+        entry_lines.append(f"\t* {additional_text.strip()}") 
+    if transcription and transcription.strip():
+        entry_lines.append(f"\t* {transcription.strip()}") 
+    if file_link and file_link.strip():
+        entry_lines.append(f"\t\t {file_link.strip()}")
+
+    entry = f"\n* **{time_str}**\n" + "\n".join(entry_lines)
+
+    # Write the entry to the end of the file
+    if note_path.exists():
+        with open(note_path, 'a', encoding='utf-8') as note_file:
+            note_file.write(entry)
+    else: 
+        date_str = date_time.strftime("%Y-%m-%d")
+        frontmatter = f"""---
+date: {date_str}
+tags:
+ - notes
+---
+
+"""
+        content = frontmatter + entry
+        # If the file doesn't exist, create it and start with "Notes"
+        with open(note_path, 'w', encoding='utf-8') as note_file:
+            note_file.write(content)
+
+    return entry
+
+
+
+async def process_document(
+    bg_tasks: BackgroundTasks,
+    document: File,
+    title: Optional[str] = None,
+    tts_mode: str = "summary",
+    voice: str = DEFAULT_VOICE
+):
+    timestamp = dt_datetime.now().strftime('%b %d, %Y at %H:%M')
+
+    # Save the document to OBSIDIAN_RESOURCES_DIR
+    document_content = await document.read()
+    file_path = Path(OBSIDIAN_VAULT_DIR) / OBSIDIAN_RESOURCES_DIR / document.filename
+    with open(file_path, 'wb') as f:
+        f.write(document_content)
+
+    parsed_content = await llm.extract_text(file_path)  # Ensure extract_text is awaited
+
+    llm_title, summary = await llm.title_and_summary(parsed_content)
+    try:
+        readable_title = sanitize_filename(title if title else document.filename)
+
+        if tts_mode == "full" or tts_mode == "content" or tts_mode == "body":
+            tts_text = parsed_content
+        elif tts_mode == "summary" or tts_mode == "excerpt":
+            tts_text = summary
+        else:
+            tts_text = None
+
+        frontmatter = f"""---
+title: {readable_title}
+added: {timestamp}
+---
+"""
+        body = f"# {readable_title}\n\n"
+
+        if tts_text:
+            try:
+                datetime_str = dt_datetime.now().strftime("%Y%m%d%H%M%S")
+                audio_filename = f"{datetime_str} {readable_title}"
+                audio_path = await tts.generate_speech(
+                    bg_tasks=bg_tasks,
+                    text=tts_text,
+                    voice=voice,
+                    model="eleven_turbo_v2",
+                    podcast=True,
+                    title=audio_filename,
+                    output_dir=Path(OBSIDIAN_VAULT_DIR) / OBSIDIAN_RESOURCES_DIR
+                )
+                audio_ext = Path(audio_path).suffix
+                obsidian_link = f"![[{OBSIDIAN_RESOURCES_DIR}/{audio_filename}{audio_ext}]]"
+                body += f"{obsidian_link}\n\n"
+            except Exception as e:
+                L.ERR(f"Failed in the TTS portion of clipping: {e}")
+
+        body += f"> [!summary]+\n"
+        body += f"> {summary}\n\n"
+        body += parsed_content
+        markdown_content = frontmatter + body
+
+        markdown_filename = f"{readable_title}.md"
+        encoding = 'utf-8'
+
+        with open(markdown_filename, 'w', encoding=encoding) as md_file:
+            md_file.write(markdown_content)
+
+        L.INFO(f"Successfully saved to {markdown_filename}")
+
+        return markdown_filename
+
+    except Exception as e:
+        L.ERR(f"Failed to clip: {str(e)}")
+        raise HTTPException(status_code=500, detail=str(e))
+
+
+
+async def process_article(
+    bg_tasks: BackgroundTasks,
+    parsed_content: Article,
+    tts_mode: str = "summary", 
+    voice: str = DEFAULT_11L_VOICE
+):
+    timestamp = dt_datetime.now().strftime('%b %d, %Y at %H:%M')
+
+    readable_title = sanitize_filename(parsed_content.title or timestamp)
+    markdown_filename, relative_path = assemble_journal_path(dt_datetime.now(), subdir="Articles", filename=readable_title, extension=".md")
+
+    try:
+        summary = await llm.summarize_text(parsed_content.clean_doc, "Summarize the provided text. Respond with the summary and nothing else. Do not otherwise acknowledge the request. Just provide the requested summary.")
+        summary = summary.replace('\n', ' ')  # Remove line breaks
+
+        if tts_mode == "full" or tts_mode == "content":
+            tts_text = parsed_content.clean_doc
+        elif tts_mode == "summary" or tts_mode == "excerpt":
+            tts_text = summary
+        else:
+            tts_text = None
+
+        banner_markdown = ''
+        try:
+            banner_url = parsed_content.top_image
+            if banner_url != '':
+                banner_image = download_file(banner_url, Path(OBSIDIAN_VAULT_DIR / OBSIDIAN_RESOURCES_DIR))
+                if banner_image:
+                    banner_markdown = f"![[{OBSIDIAN_RESOURCES_DIR}/{banner_image}]]"
+                
+        except Exception as e:
+            L.ERR(f"No image found in article")
+
+        authors = ', '.join('[[{}]]'.format(author) for author in parsed_content.authors)
+        published_date = parsed_content.publish_date
+        frontmatter = f"""---
+title: {readable_title}
+authors: {authors}
+published: {published_date}
+added: {timestamp}
+banner: "{banner_markdown}"
+tags:
+
+"""
+        frontmatter += '\n'.join(f" - {tag}" for tag in parsed_content.tags)
+        frontmatter += '\n---\n'
+
+        body = f"# {readable_title}\n\n"
+        if tts_text:
+            audio_filename = f"{published_date} {readable_title}"
+            try:
+                audio_path = await tts.generate_speech(bg_tasks=bg_tasks, text=tts_text, voice=voice, model="eleven_turbo_v2", podcast=True, title=audio_filename,
+                output_dir=Path(OBSIDIAN_VAULT_DIR) / OBSIDIAN_RESOURCES_DIR)
+                audio_ext = Path(audio_path).suffix
+                obsidian_link = f"![[{OBSIDIAN_RESOURCES_DIR}/{audio_filename}{audio_ext}]]"
+                body += f"{obsidian_link}\n\n"
+            except Exception as e:
+                L.ERR(f"Failed to generate TTS for np3k. {e}")
+
+        try:
+            body += f"by {authors} in {parsed_content.canonical_link}" # update with method for getting the newspaper name
+            body += f"> [!summary]+\n"
+            body += f"> {summary}\n\n"
+            body += parsed_content["content"]
+            markdown_content = frontmatter + body
+
+        except Exception as e:
+            L.ERR(f"Failed to combine elements of article markdown.")
+
+        try:
+            with open(markdown_filename, 'w') as md_file:
+                md_file.write(markdown_content)
+
+            L.INFO(f"Successfully saved to {markdown_filename}")
+            add_to_daily_note
+            return markdown_filename
+        
+        except Exception as e:
+            L.ERR(f"Failed to write markdown file")
+            raise HTTPException(status_code=500, detail=str(e))
+        
+    except Exception as e:
+        L.ERR(f"Failed to clip: {str(e)}")
+        raise HTTPException(status_code=500, detail=str(e))
+
+
+async def process_article2(
+    bg_tasks: BackgroundTasks,
+    url: str,
+    title: Optional[str] = None,
+    encoding: str = 'utf-8',
+    source: Optional[str] = None,
+    tts_mode: str = "summary", 
+    voice: str = DEFAULT_11L_VOICE
+):
+
+    timestamp = dt_datetime.now().strftime('%b %d, %Y at %H:%M')
+
+    parsed_content = await parse_article(url, source)
+    if parsed_content is None:
+        return {"error": "Failed to retrieve content"}
+
+    readable_title = sanitize_filename(title or parsed_content.get("title") or timestamp)
+    markdown_filename, relative_path = assemble_journal_path(dt_datetime.now(), subdir="Articles", filename=readable_title, extension=".md")
+
+    try:
+        summary = await llm.summarize_text(parsed_content["content"], "Summarize the provided text. Respond with the summary and nothing else. Do not otherwise acknowledge the request. Just provide the requested summary.")
+        summary = summary.replace('\n', ' ')  # Remove line breaks
+
+        if tts_mode == "full" or tts_mode == "content":
+            tts_text = parsed_content["content"]
+        elif tts_mode == "summary" or tts_mode == "excerpt":
+            tts_text = summary
+        else:
+            tts_text = None
+
+        banner_markdown = ''
+        try:
+            banner_url = parsed_content.get('image', '')
+            if banner_url != '':
+                banner_image = download_file(banner_url, Path(OBSIDIAN_VAULT_DIR / OBSIDIAN_RESOURCES_DIR))
+                if banner_image:
+                    banner_markdown = f"![[{OBSIDIAN_RESOURCES_DIR}/{banner_image}]]"
+                
+        except Exception as e:
+            L.ERR(f"No image found in article")
+
+        authors = ', '.join('[[{}]]'.format(author) for author in parsed_content.get('authors', ['Unknown']))
+
+        frontmatter = f"""---
+title: {readable_title}
+authors: {', '.join('[[{}]]'.format(author) for author in parsed_content.get('authors', ['Unknown']))}
+published: {parsed_content.get('date_published', 'Unknown')}
+added: {timestamp}
+excerpt: {parsed_content.get('excerpt', '')}
+banner: "{banner_markdown}"
+tags:
+
+"""
+        frontmatter += '\n'.join(f" - {tag}" for tag in parsed_content.get('tags', []))
+        frontmatter += '\n---\n'
+
+        body = f"# {readable_title}\n\n"
+
+        if tts_text:
+            datetime_str = dt_datetime.now().strftime("%Y%m%d%H%M%S")
+            audio_filename = f"{datetime_str} {readable_title}"
+            try:
+                audio_path = await tts.generate_speech(bg_tasks=bg_tasks, text=tts_text, voice=voice, model="eleven_turbo_v2", podcast=True, title=audio_filename,
+                output_dir=Path(OBSIDIAN_VAULT_DIR) / OBSIDIAN_RESOURCES_DIR)
+                audio_ext = Path(audio_path).suffix
+                obsidian_link = f"![[{OBSIDIAN_RESOURCES_DIR}/{audio_filename}{audio_ext}]]"
+                body += f"{obsidian_link}\n\n"
+            except Exception as e:
+                L.ERR(f"Failed to generate TTS for np3k. {e}")
+
+        try:
+            body += f"by {authors} in [{parsed_content.get('domain', urlparse(url).netloc.replace('www.', ''))}]({url}).\n\n"
+            body += f"> [!summary]+\n"
+            body += f"> {summary}\n\n"
+            body += parsed_content["content"]
+            markdown_content = frontmatter + body
+
+        except Exception as e:
+            L.ERR(f"Failed to combine elements of article markdown.")
+
+        try:
+            with open(markdown_filename, 'w', encoding=encoding) as md_file:
+                md_file.write(markdown_content)
+
+            L.INFO(f"Successfully saved to {markdown_filename}")
+            add_to_daily_note
+            return markdown_filename
+        
+        except Exception as e:
+            L.ERR(f"Failed to write markdown file")
+            raise HTTPException(status_code=500, detail=str(e))
+        
+    except Exception as e:
+        L.ERR(f"Failed to clip {url}: {str(e)}")
+        raise HTTPException(status_code=500, detail=str(e))
+
+
+
+async def parse_article(url: str, source: Optional[str] = None) -> Article:
+    source = source if source else trafilatura.fetch_url(url)
+    traf = trafilatura.extract_metadata(filecontent=source, default_url=url)
+
+    # Create and parse the newspaper3k Article
+    article = Article(url)
+    article.set_html(source)
+    article.parse()
+
+    L.INFO(f"Parsed {article.title}")
+
+    # Update or set properties based on trafilatura and additional processing
+    article.title = article.title or traf.title or url
+    article.authors = article.authors or (traf.author if isinstance(traf.author, list) else [traf.author])
+    
+    article.publish_date = article.publish_date or traf.date
+    try:
+        article.publish_date = await loc.dt(article.publish_date, "UTC")
+    except:
+        L.DEBUG(f"Failed to localize {article.publish_date}")
+        article.publish_date = await loc.dt(dt_datetime.now(), "UTC")
+
+    article.meta_description = article.meta_description or traf.description
+    article.text = trafilatura.extract(source, output_format="markdown", include_comments=False) or article.text
+    article.top_image = article.top_image or traf.image
+    article.source_url = traf.sitename or urlparse(url).netloc.replace('www.', '').title()
+    article.meta_keywords = article.meta_keywords or traf.categories or traf.tags
+    article.meta_keywords = article.meta_keywords if isinstance(article.meta_keywords, list) else [article.meta_keywords]
+
+    # Set additional data in the additional_data dictionary
+    article.additional_data = {
+        'excerpt': article.meta_description,
+        'domain': article.source_url,
+        'tags': article.meta_keywords,
+        'content': article.text  # Store the markdown content here
+    }
+
+    return article
+
+
+
+async def html_to_markdown(url: str = None, source: str = None) -> Optional[str]:
+    if source:
+        html_content = source
+    elif url:
+        async with aiohttp.ClientSession() as session:
+            async with session.get(url) as response:
+                html_content = await response.text()
+    else:
+        L.ERR(f"Unable to convert nothing to markdown.")
+        return None
+
+    # Use readability to extract the main content
+    doc = Document(html_content)
+    cleaned_html = doc.summary()
+
+    # Parse the cleaned HTML with BeautifulSoup for any additional processing
+    soup = BeautifulSoup(cleaned_html, 'html.parser')
+
+    # Remove any remaining unwanted elements
+    for element in soup(['script', 'style']):
+        element.decompose()
+
+    # Convert to markdown
+    markdown_content = md(str(soup), heading_style="ATX")
+
+    return markdown_content
+
+
+async def process_archive(
+    url: str,
+    title: Optional[str] = None,
+    encoding: str = 'utf-8',
+    source: Optional[str] = None,
+) -> Path:
+    timestamp = dt_datetime.now().strftime('%b %d, %Y at %H:%M')
+    readable_title = title if title else f"{url} - {timestamp}"
+    
+    content = await html_to_markdown(url, source)
+    if content is None:
+        raise HTTPException(status_code=400, detail="Failed to convert content to markdown")
+    
+    markdown_path, relative_path = assemble_archive_path(readable_title, ".md")
+    
+    markdown_content = f"---\n"
+    markdown_content += f"title: {readable_title}\n"
+    markdown_content += f"added: {timestamp}\n"
+    markdown_content += f"url: {url}"
+    markdown_content += f"date: {dt_datetime.now().strftime('%Y-%m-%d')}"
+    markdown_content += f"---\n\n"
+    markdown_content += f"# {readable_title}\n\n"
+    markdown_content += f"Clipped from [{url}]({url}) on {timestamp}"
+    markdown_content += content
+    
+    try:
+        markdown_path.parent.mkdir(parents=True, exist_ok=True)
+        with open(markdown_path, 'w', encoding=encoding) as md_file:
+            md_file.write(markdown_content)
+        L.DEBUG(f"Successfully saved to {markdown_path}")
+        return markdown_path
+    except Exception as e:
+        L.WARN(f"Failed to write markdown file: {str(e)}")
+        return None
+
+def download_file(url, folder):
+    os.makedirs(folder, exist_ok=True)
+    filename = str(uuid.uuid4()) + os.path.splitext(urlparse(url).path)[-1]
+    filepath = os.path.join(folder, filename)
+    
+    session = requests.Session()
+    retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
+    session.mount('http://', HTTPAdapter(max_retries=retries))
+    session.mount('https://', HTTPAdapter(max_retries=retries))
+
+    headers = {
+        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
+    }
+
+    try:
+        response = session.get(url, headers=headers, timeout=10)
+        if response.status_code == 200:
+            if 'image' in response.headers.get('Content-Type', ''):
+                with open(filepath, 'wb') as f:
+                    f.write(response.content)
+            else:
+                L.ERR(f"Failed to download image: {url}, invalid content type: {response.headers.get('Content-Type')}")
+                return None
+        else:
+            L.ERR(f"Failed to download image: {url}, status code: {response.status_code}")
+            return None
+    except Exception as e:
+        L.ERR(f"Failed to download image: {url}, error: {str(e)}")
+        return None
+    return filename
+
+def copy_file(local_path, folder):
+    os.makedirs(folder, exist_ok=True)
+    filename = os.path.basename(local_path)
+    destination_path = os.path.join(folder, filename)
+    shutil.copy(local_path, destination_path)
+    return filename
+
+
+async def save_file(file: UploadFile, folder: Path) -> Path:
+    file_path = folder / f"{dt_datetime.now().strftime('%Y%m%d_%H%M%S')}_{file.filename}"
+    with open(file_path, 'wb') as f:
+        shutil.copyfileobj(file.file, f)
+    return file_path
+
diff --git a/sijapi/routers/serve.py b/sijapi/routers/serve.py
index eea8725..3c88a8d 100644
--- a/sijapi/routers/serve.py
+++ b/sijapi/routers/serve.py
@@ -33,7 +33,7 @@ from sijapi import (
     MAC_UN, MAC_PW, MAC_ID, TS_TAILNET, DATA_DIR, SD_IMAGE_DIR, PUBLIC_KEY, OBSIDIAN_VAULT_DIR
 )
 from sijapi.utilities import bool_convert, sanitize_filename, assemble_journal_path
-from sijapi.routers import note, locate
+from sijapi.routers import loc, note
 
 
 serve = APIRouter(tags=["public"])
@@ -69,7 +69,7 @@ def is_valid_date(date_str: str) -> bool:
 @serve.get("/notes/{file_path:path}")
 async def get_file_endpoint(file_path: str):
     try:
-        date_time = await locate.localize_datetime(file_path);
+        date_time = await loc.dt(file_path);
         absolute_path, local_path = assemble_journal_path(date_time, no_timestamp = True)
     except ValueError as e:
         L.DEBUG(f"Unable to parse {file_path} as a date, now trying to use it as a local path")
diff --git a/sijapi/routers/weather.py b/sijapi/routers/weather.py
index 6f2ab5d..a9068ca 100644
--- a/sijapi/routers/weather.py
+++ b/sijapi/routers/weather.py
@@ -10,9 +10,9 @@ from typing import Dict
 from datetime import datetime
 from shapely.wkb import loads
 from binascii import unhexlify
-from sijapi import L, VISUALCROSSING_API_KEY, TZ, DB
+from sijapi import L, VISUALCROSSING_API_KEY, TZ, DB, GEO
 from sijapi.utilities import haversine
-from sijapi.routers import locate
+from sijapi.routers import loc
 
 weather = APIRouter()
 
@@ -20,19 +20,19 @@ weather = APIRouter()
 async def get_weather(date_time: datetime, latitude: float, longitude: float):
     # request_date_str = date_time.strftime("%Y-%m-%d")
     L.DEBUG(f"Called get_weather with lat: {latitude}, lon: {longitude}, date_time: {date_time}")
+    L.WARN(f"Using {date_time.strftime('%Y-%m-%d %H:%M:%S')} as our datetime in get_weather.")
     daily_weather_data = await get_weather_from_db(date_time, latitude, longitude)
     fetch_new_data = True
     if daily_weather_data:
         try:
             L.DEBUG(f"Daily weather data from db: {daily_weather_data}")
             last_updated = str(daily_weather_data['DailyWeather'].get('last_updated'))
-            last_updated = await locate.localize_datetime(last_updated)
+            last_updated = await loc.dt(last_updated)
             stored_loc_data = unhexlify(daily_weather_data['DailyWeather'].get('location'))
             stored_loc = loads(stored_loc_data)
             stored_lat = stored_loc.y
             stored_lon = stored_loc.x
             stored_ele = stored_loc.z
-
             
             hourly_weather = daily_weather_data.get('HourlyWeather')
 
@@ -53,6 +53,7 @@ async def get_weather(date_time: datetime, latitude: float, longitude: float):
     if fetch_new_data:
         L.DEBUG(f"We require new data!")
         request_date_str = date_time.strftime("%Y-%m-%d")
+        L.WARN(f"Using {date_time.strftime('%Y-%m-%d')} as our datetime for fetching new data.")
         url = f"https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{latitude},{longitude}/{request_date_str}/{request_date_str}?unitGroup=us&key={VISUALCROSSING_API_KEY}"
         try:
             async with AsyncClient() as client:
@@ -85,6 +86,7 @@ async def get_weather(date_time: datetime, latitude: float, longitude: float):
 
 
 async def store_weather_to_db(date_time: datetime, weather_data: dict):
+    L.WARN(f"Using {date_time.strftime('%Y-%m-%d %H:%M:%S')} as our datetime in store_weather_to_db")
     async with DB.get_connection() as conn:
         try:
             day_data = weather_data.get('days')[0]
@@ -95,16 +97,21 @@ async def store_weather_to_db(date_time: datetime, weather_data: dict):
             stations_array = day_data.get('stations', []) or []
 
             date_str = date_time.strftime("%Y-%m-%d")
+            L.WARN(f"Using {date_str} in our query in store_weather_to_db.")
 
             # Get location details from weather data if available
             longitude = weather_data.get('longitude')
             latitude = weather_data.get('latitude')
-            elevation = await locate.get_elevation(latitude, longitude) # 152.4  # default until we add a geocoder that can look up actual elevation; weather_data.get('elevation')  # assuming 'elevation' key, replace if different
+            elevation = await GEO.elevation(latitude, longitude)
             location_point = f"POINTZ({longitude} {latitude} {elevation})" if longitude and latitude and elevation else None
 
             # Correct for the datetime objects 
-            day_data['datetime'] = await locate.localize_datetime(day_data.get('datetime')) #day_data.get('datetime'))
+            L.WARN(f"Uncorrected datetime in store_weather_to_db: {day_data['datetime']}")
+            day_data['datetime'] = await loc.dt(day_data.get('datetime')) #day_data.get('datetime'))
+            L.WARN(f"Corrected datetime in store_weather_to_db with localized datetime: {day_data['datetime']}")
+            L.WARN(f"Uncorrected sunrise time in store_weather_to_db: {day_data['sunrise']}")
             day_data['sunrise'] = day_data['datetime'].replace(hour=int(day_data.get('sunrise').split(':')[0]), minute=int(day_data.get('sunrise').split(':')[1]))
+            L.WARN(f"Corrected sunrise time in store_weather_to_db with localized datetime: {day_data['sunrise']}")
             day_data['sunset'] = day_data['datetime'].replace(hour=int(day_data.get('sunset').split(':')[0]), minute=int(day_data.get('sunset').split(':')[1])) 
 
             daily_weather_params = (
@@ -160,7 +167,7 @@ async def store_weather_to_db(date_time: datetime, weather_data: dict):
                         await asyncio.sleep(0.1)
                     #    hour_data['datetime'] = parse_date(hour_data.get('datetime'))
                         hour_timestamp = date_str + ' ' + hour_data['datetime']
-                        hour_data['datetime'] = await locate.localize_datetime(hour_timestamp)
+                        hour_data['datetime'] = await loc.dt(hour_timestamp)
                         L.DEBUG(f"Processing hours now...")
                         # L.DEBUG(f"Processing {hour_data['datetime']}")
 
@@ -226,6 +233,7 @@ async def store_weather_to_db(date_time: datetime, weather_data: dict):
 
 
 async def get_weather_from_db(date_time: datetime, latitude: float, longitude: float):
+    L.WARN(f"Using {date_time.strftime('%Y-%m-%d %H:%M:%S')} as our datetime in get_weather_from_db.")
     async with DB.get_connection() as conn:
         query_date = date_time.date()
         try:
@@ -238,25 +246,38 @@ async def get_weather_from_db(date_time: datetime, latitude: float, longitude: f
                 LIMIT 1
             '''
 
-            daily_weather_data = await conn.fetchrow(query, query_date, longitude, latitude, longitude, latitude)
+            
+            daily_weather_record = await conn.fetchrow(query, query_date, longitude, latitude, longitude, latitude)
 
-            if daily_weather_data is None:
+            if daily_weather_record is None:
                 L.DEBUG(f"No daily weather data retrieved from database.")
                 return None
-            # else:
-                # L.DEBUG(f"Daily_weather_data: {daily_weather_data}")
+
+            # Convert asyncpg.Record to a mutable dictionary
+            daily_weather_data = dict(daily_weather_record)
+
+            # Now we can modify the dictionary
+            daily_weather_data['datetime'] = await loc.dt(daily_weather_data.get('datetime'))
+
             # Query to get hourly weather data
             query = '''
                 SELECT HW.* FROM HourlyWeather HW
                 WHERE HW.daily_weather_id = $1
             '''
-            hourly_weather_data = await conn.fetch(query, daily_weather_data['id'])
+            
+            hourly_weather_records = await conn.fetch(query, daily_weather_data['id'])
+            
+            hourly_weather_data = []
+            for record in hourly_weather_records:
+                hour_data = dict(record)
+                hour_data['datetime'] = await loc.dt(hour_data.get('datetime'))
+                hourly_weather_data.append(hour_data)
 
-            day: Dict = {
-                'DailyWeather': dict(daily_weather_data),
-                'HourlyWeather': [dict(row) for row in hourly_weather_data],
+            day = {
+                'DailyWeather': daily_weather_data,
+                'HourlyWeather': hourly_weather_data,
             }
-            # L.DEBUG(f"day: {day}")
+            L.DEBUG(f"day: {day}")
             return day
         except Exception as e:
             L.ERR(f"Unexpected error occurred: {e}")