class DocumentRefresher
A class that forces the reMarkable web app to refresh and display documents by programmatically moving them between folders, triggering synchronization.
/tf/active/vicechatdev/e-ink-llm/cloudtest/force_web_app_refresh.py
49 - 292
complex
Purpose
DocumentRefresher interacts with the reMarkable Cloud API to manipulate document metadata and folder structure. Its primary use case is to force the web application to recognize and display newly uploaded or modified documents by moving them to the root folder and back to their original location. This triggers the web app's synchronization mechanism, making documents immediately visible without manual refresh. The class handles authentication, retrieves document schemas, updates metadata, and manages the complex process of updating the reMarkable cloud storage hierarchy.
Source Code
class DocumentRefresher:
"""Force web app refresh by moving document"""
def __init__(self):
# Load auth session
auth = RemarkableAuth()
self.session = auth.get_authenticated_session()
if not self.session:
raise RuntimeError("Failed to authenticate with reMarkable")
print("š Document Refresher Initialized")
def get_current_root_info(self):
"""Get current root.docSchema info"""
root_response = self.session.get("https://eu.tectonic.remarkable.com/sync/v4/root")
root_response.raise_for_status()
root_data = root_response.json()
# Get root content
root_content_response = self.session.get(f"https://eu.tectonic.remarkable.com/sync/v3/files/{root_data['hash']}")
root_content_response.raise_for_status()
root_content = root_content_response.text
return root_data, root_content
def find_document_in_root(self, doc_uuid: str, root_content: str):
"""Find document entry in root.docSchema"""
lines = root_content.strip().split('\n')
for line in lines[1:]: # Skip version header
if doc_uuid in line:
parts = line.split(':')
if len(parts) >= 5:
return {
'hash': parts[0],
'uuid': parts[2],
'type': parts[3],
'size': parts[4],
'full_line': line
}
raise ValueError(f"Document {doc_uuid} not found in root.docSchema")
def get_document_metadata(self, doc_hash: str):
"""Get document metadata"""
# Get document schema
doc_response = self.session.get(f"https://eu.tectonic.remarkable.com/sync/v3/files/{doc_hash}")
doc_response.raise_for_status()
doc_content = doc_response.text
doc_lines = doc_content.strip().split('\n')
# Find metadata hash
metadata_hash = None
metadata_line = None
for line in doc_lines[1:]:
if ':' in line and '.metadata' in line:
parts = line.split(':')
if len(parts) >= 5:
metadata_hash = parts[0]
metadata_line = line
break
if not metadata_hash:
raise ValueError("Metadata component not found")
# Fetch metadata
metadata_response = self.session.get(f"https://eu.tectonic.remarkable.com/sync/v3/files/{metadata_hash}")
metadata_response.raise_for_status()
current_metadata = json.loads(metadata_response.text)
return current_metadata, doc_lines, metadata_line
def update_document_parent(self, doc_uuid: str, new_parent: str, description: str):
"""Update document parent and upload changes"""
print(f"\nš {description}")
# Get current state
root_data, root_content = self.get_current_root_info()
doc_info = self.find_document_in_root(doc_uuid, root_content)
current_metadata, doc_lines, metadata_line = self.get_document_metadata(doc_info['hash'])
print(f" Current parent: {current_metadata.get('parent', '(root)')}")
print(f" New parent: {new_parent or '(root)'}")
# Update metadata
updated_metadata = current_metadata.copy()
updated_metadata['parent'] = new_parent
updated_metadata['lastModified'] = int(time.time() * 1000)
updated_metadata['metadatamodified'] = True
updated_metadata['modified'] = True
# Upload new metadata
metadata_json = json.dumps(updated_metadata, separators=(',', ':'))
metadata_hash = hashlib.sha256(metadata_json.encode()).hexdigest()
headers = {
'Content-Type': 'application/octet-stream',
'rm-batch-number': '1',
'rm-filename': f'{doc_uuid}.metadata',
'rm-sync-id': str(uuid.uuid4()),
'User-Agent': 'reMarkable-desktop-win/3.11.1.1951',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-BE,*',
'Connection': 'Keep-Alive'
}
crc32c_header = compute_crc32c_header(metadata_json.encode())
if crc32c_header:
headers['x-goog-hash'] = crc32c_header
upload_response = self.session.put(
f"https://eu.tectonic.remarkable.com/sync/v3/files/{metadata_hash}",
data=metadata_json.encode(),
headers=headers
)
upload_response.raise_for_status()
# Update document schema
new_doc_lines = []
for line in doc_lines:
if line == metadata_line:
parts = line.split(':')
parts[0] = metadata_hash
new_doc_lines.append(':'.join(parts))
else:
new_doc_lines.append(line)
new_doc_content = '\n'.join(new_doc_lines)
doc_hash = hashlib.sha256(new_doc_content.encode()).hexdigest()
headers = {
'Content-Type': 'application/octet-stream',
'rm-batch-number': '1',
'rm-filename': f'{doc_uuid}.docSchema',
'rm-sync-id': str(uuid.uuid4()),
'User-Agent': 'reMarkable-desktop-win/3.11.1.1951',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-BE,*',
'Connection': 'Keep-Alive'
}
crc32c_header = compute_crc32c_header(new_doc_content.encode())
if crc32c_header:
headers['x-goog-hash'] = crc32c_header
upload_response = self.session.put(
f"https://eu.tectonic.remarkable.com/sync/v3/files/{doc_hash}",
data=new_doc_content.encode(),
headers=headers
)
upload_response.raise_for_status()
# Update root.docSchema
old_line = doc_info['full_line']
parts = old_line.split(':')
parts[0] = doc_hash
new_line = ':'.join(parts)
new_root_content = root_content.replace(old_line, new_line)
# Upload new root
root_hash = hashlib.sha256(new_root_content.encode()).hexdigest()
headers = {
'Content-Type': 'text/plain',
'rm-batch-number': '1',
'rm-filename': 'root.docSchema',
'rm-sync-id': str(uuid.uuid4()),
'User-Agent': 'reMarkable-desktop-win/3.11.1.1951',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-BE,*',
'Connection': 'Keep-Alive'
}
crc32c_header = compute_crc32c_header(new_root_content.encode())
if crc32c_header:
headers['x-goog-hash'] = crc32c_header
upload_response = self.session.put(
f"https://eu.tectonic.remarkable.com/sync/v3/files/{root_hash}",
data=new_root_content.encode(),
headers=headers
)
upload_response.raise_for_status()
# Update root hash pointer
root_update_data = {
"broadcast": True,
"generation": root_data['generation'],
"hash": root_hash
}
root_content_body = json.dumps(root_update_data, indent=2).encode('utf-8')
headers = {
'Content-Type': 'application/json',
'rm-batch-number': '1',
'rm-filename': 'roothash',
'rm-sync-id': str(uuid.uuid4()),
'User-Agent': 'reMarkable-desktop-win/3.11.1.1951',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-BE,*',
'Connection': 'Keep-Alive'
}
crc32c_header = compute_crc32c_header(root_content_body)
if crc32c_header:
headers['x-goog-hash'] = crc32c_header
root_update_response = self.session.put(
"https://eu.tectonic.remarkable.com/sync/v3/root",
data=root_content_body,
headers=headers
)
root_update_response.raise_for_status()
print(f" ā
{description} completed successfully")
return True
def force_refresh_document(self, doc_uuid: str):
"""Force web app refresh by moving document around"""
print(f"š Force Refreshing Document Visibility")
print(f"Document UUID: {doc_uuid}")
print("=" * 60)
try:
# Step 1: Move document to root folder (this should make it visible immediately)
self.update_document_parent(doc_uuid, "", "Moving document to root folder")
# Step 2: Wait a moment to let web app sync
print(f"\nā³ Waiting 3 seconds for web app sync...")
time.sleep(3)
# Step 3: Move document back to gpt_in folder
gpt_in_uuid = "99c6551f-2855-44cf-a4e4-c9c586558f42"
self.update_document_parent(doc_uuid, gpt_in_uuid, "Moving document back to gpt_in folder")
print(f"\nš Document refresh sequence completed!")
print(f"š” The document should now be visible in the gpt_in folder in the web app.")
print(f"š” If still not visible, try refreshing the web browser page.")
return True
except Exception as e:
print(f"\nā Refresh operation failed: {e}")
return False
Parameters
| Name | Type | Default | Kind |
|---|---|---|---|
bases |
- | - |
Parameter Details
__init__: The constructor takes no parameters. It automatically initializes authentication with the reMarkable service using RemarkableAuth, creates an authenticated session, and validates that authentication succeeded. Raises RuntimeError if authentication fails.
Return Value
The class instantiation returns a DocumentRefresher object with an authenticated session. Key method returns: get_current_root_info() returns a tuple of (root_data dict, root_content string); find_document_in_root() returns a dict with document info (hash, uuid, type, size, full_line); get_document_metadata() returns a tuple of (metadata dict, doc_lines list, metadata_line string); update_document_parent() returns True on success; force_refresh_document() returns True on success, False on failure.
Class Interface
Methods
__init__(self)
Purpose: Initialize the DocumentRefresher with authenticated session to reMarkable Cloud API
Returns: None - initializes instance with authenticated session
get_current_root_info(self) -> tuple[dict, str]
Purpose: Retrieve the current root.docSchema information including generation and hash
Returns: Tuple of (root_data dict containing generation/hash, root_content string with document schema)
find_document_in_root(self, doc_uuid: str, root_content: str) -> dict
Purpose: Locate a specific document entry within the root.docSchema content
Parameters:
doc_uuid: UUID string of the document to findroot_content: String content of root.docSchema file
Returns: Dict with keys: hash, uuid, type, size, full_line containing document information. Raises ValueError if document not found.
get_document_metadata(self, doc_hash: str) -> tuple[dict, list, str]
Purpose: Fetch and parse document metadata from the cloud storage
Parameters:
doc_hash: SHA256 hash string identifying the document schema
Returns: Tuple of (metadata dict with document properties, doc_lines list of schema lines, metadata_line string). Raises ValueError if metadata not found.
update_document_parent(self, doc_uuid: str, new_parent: str, description: str) -> bool
Purpose: Update a document's parent folder by modifying metadata and uploading changes to cloud
Parameters:
doc_uuid: UUID string of the document to movenew_parent: UUID string of the new parent folder, or empty string for rootdescription: Human-readable description of the operation for logging
Returns: True on successful update. Raises exceptions on API failures.
force_refresh_document(self, doc_uuid: str) -> bool
Purpose: Force the web app to refresh and display a document by moving it to root and back to gpt_in folder
Parameters:
doc_uuid: UUID string of the document to refresh
Returns: True if refresh sequence completed successfully, False if any error occurred
Attributes
| Name | Type | Description | Scope |
|---|---|---|---|
session |
requests.Session | Authenticated HTTP session object for making API requests to reMarkable Cloud, obtained from RemarkableAuth | instance |
Dependencies
jsontimehashlibuuidbase64zlibpathlibcrc32c
Required Imports
import json
import time
import hashlib
import uuid
import base64
import zlib
from pathlib import Path
from auth import RemarkableAuth
import crc32c
Usage Example
# Initialize the refresher
refresher = DocumentRefresher()
# Force refresh a specific document by UUID
doc_uuid = 'abc123-def456-ghi789'
success = refresher.force_refresh_document(doc_uuid)
if success:
print('Document is now visible in web app')
else:
print('Refresh failed')
# Manually move a document to a different parent folder
parent_folder_uuid = '99c6551f-2855-44cf-a4e4-c9c586558f42'
refresher.update_document_parent(doc_uuid, parent_folder_uuid, 'Moving to target folder')
# Get current root information
root_data, root_content = refresher.get_current_root_info()
print(f'Root generation: {root_data["generation"]}')
# Find a document in the root schema
doc_info = refresher.find_document_in_root(doc_uuid, root_content)
print(f'Document hash: {doc_info["hash"]}')
Best Practices
- Always instantiate the class in a try-except block to handle authentication failures gracefully
- The force_refresh_document method includes a 3-second sleep to allow web app synchronization - do not remove this delay
- Document UUIDs must be valid and exist in the reMarkable cloud before calling methods
- The class modifies cloud state - ensure you have backups or understand the implications of moving documents
- Network errors can occur at multiple stages - wrap method calls in try-except blocks for production use
- The update_document_parent method performs multiple API calls sequentially - it is not atomic and can leave inconsistent state if interrupted
- The hardcoded gpt_in folder UUID (99c6551f-2855-44cf-a4e4-c9c586558f42) in force_refresh_document should be parameterized for reusability
- Session object is reused across methods - the class maintains state and should not be shared across threads without synchronization
- All API calls use the EU endpoint (eu.tectonic.remarkable.com) - may need adjustment for other regions
- The class prints status messages to stdout - redirect or capture output if running in a service context
Tags
Similar Components
AI-powered semantic similarity - components with related functionality:
-
function main_v83 73.3% similar
-
class RemarkableReplicaSync 68.8% similar
-
class DocumentToTrashMover 68.4% similar
-
class RootCleaner 66.1% similar
-
class RemarkableAPIClient 65.9% similar