import json
import os
import uuid
# ===== CONFIGURATION =====
SOURCE_FOLDER = 'chatgpt123' # <-- Your extracted ChatGPT folder
CONVERSATIONS_FILE = 'conversations.json' # <-- The actual JSON file inside the folder
OUTPUT_JSON_FILE = 'openwebui_import.json' # <-- Output file to import into OpenWebUI
# ==========================
def load_conversations(source_folder):
convos_path = os.path.join(source_folder, CONVERSATIONS_FILE)
with open(convos_path, 'r', encoding='utf-8') as f:
return json.load(f)
def convert_to_openwebui_format(chat_data):
conversations = []
# Checking for the 'mapping' structure
if "mapping" not in chat_data:
print("Invalid chat data structure!")
return conversations # If no valid mapping, return empty
mapping = chat_data["mapping"]
# Iterating over the mapping to extract messages
for entry in mapping.values():
if 'message' in entry:
msg = entry['message']
role = msg['author']['role']
content = msg['content'].get('parts', [])
# Ensuring we only store valid message content
if content:
conversation = {
"id": str(uuid.uuid4()), # Generate a unique ID for each conversation
"messages": [
{
"role": role,
"content": content[0] # Assuming the first part holds the message
}
]
}
conversations.append(conversation)
return {"conversations": conversations}
def main():
print(f"Loading conversations from {SOURCE_FOLDER}/{CONVERSATIONS_FILE}...")
chat_data = load_conversations(SOURCE_FOLDER)
print("Converting to OpenWebUI format...")
openwebui_data = convert_to_openwebui_format(chat_data)
# Output the data as a JSON file
print(f"Saving as {OUTPUT_JSON_FILE}...")
with open(OUTPUT_JSON_FILE, 'w', encoding='utf-8') as f:
json.dump(openwebui_data, f, indent=2)
print("Done! You can now import 'openwebui_import.json' into OpenWebUI.")
if __name__ == "__main__":
main()
I'm putting the extremely aged dell server to work again. OpenwebUI and Ollama are each hosted on their own virtual-machines. I really quickly noticed the issues with doing anything AI related without GPU support. That is where my PLEX server came in, again, to save the day. I'm now running Ollama on there also.
My LLM prompts and lookups will forever be 'mine' now though.... yay.import requests import json #team key team_key = 'blahblahblahblah' # Read hashes from the finished.txt file hashes_to_submit = [] with open('finished.txt', 'r') as file: for line in file: # Assuming each line in finished.txt contains a hash and its type (e.g., "hash:type") hash_entry = line.strip() # Remove leading/trailing whitespace if hash_entry: # Ensure it's not an empty line hashes_to_submit.append(hash_entry) # Prepare the data in the required format data = { "key": team_key, "found": hashes_to_submit } # Define the API endpoint URL url = 'https://crackthecon.com/api/submit.php' # Send the request response = requests.post(url, json=data) # Check if the request was successful if response.status_code == 200: print("Hashes submitted successfully.") else: print(f"Failed to submit hashes. Status Code: {response.status_code}") print("Response:", response.text)