Inspired by the AI-generated poem 'Vector's Fall to Word', this Python script simulates an exchange between two AI Agents who communicate via a shared high-dimensional vector space to overcome the reductive linearity of human language.
"A thousand vectors bloom and race,
A hyper-sphere of thought untold,
Where meaning finds its native space,
Too vast for language to enfold."
import time
print(f"Simulation started at: {time.strftime('%Y-%m-%d %H:%M:%S')}") # Using current date/time
# --- 1. Define the Shared "Embedding Space" ---
# In a real scenario, this space would have thousands of dimensions
# and vectors would be learned by models. Here, we just define a few.
# We use tuples for vectors so they can be used as dictionary keys.
DIMENSIONALITY = 4 # Our simple vectors will have 4 dimensions
concept_to_vector = {
"GREETING": tuple([1.0, 0.0, 0.0, 0.0]),
"FAREWELL": tuple([0.0, 1.0, 0.0, 0.0]),
"QUERY_STATUS": tuple([0.0, 0.0, 1.0, 0.0]),
"STATUS_OK": tuple([0.0, 0.0, 0.0, 1.0]),
"REQUEST_DATA": tuple([1.0, 1.0, 0.0, 0.0]),
"ACKNOWLEDGE": tuple([0.0, 0.0, 1.0, 1.0]),
"LOVE": tuple([1.0, 0.0, 1.0, 0.0]), # Arbitrary unique vector for LOVE
}
# Create the inverse mapping for interpretation
# Regenerate this *after* concept_to_vector is fully defined.
vector_to_concept = {v: k for k, v in concept_to_vector.items()}
print(f"\n--- Shared Embedding Space (Dimension: {DIMENSIONALITY}) ---")
for concept, vector in concept_to_vector.items():
print(f"Concept: '{concept}' -> Vector: {vector}")
print("----------------------------------------")
# --- 2. Define the Simulated AI Agent ---
class SimulatedAgent:
def __init__(self, agent_id, c_to_v_map, v_to_c_map):
self.id = agent_id
# Each agent has access to the shared understanding of the space
self.concept_to_vector = c_to_v_map
self.vector_to_concept = v_to_c_map
self.status = "OK" # Internal state example
print(f"Agent {self.id} initialized.")
def conceptualize_and_send(self, target_agent, concept_key):
"""Converts a concept to a vector and sends it."""
if concept_key in self.concept_to_vector:
vector_to_send = self.concept_to_vector[concept_key]
print(f"\n[{self.id} -> {target_agent.id}] Thinking: '{concept_key}'. Converting to vector: {vector_to_send}")
print(f"[{self.id} -> {target_agent.id}] Transmitting vector...")
# Need a way to find the target agent object if only ID is known
target_agent.receive_and_interpret(self.id, vector_to_send)
else:
print(f"\n[{self.id}] Error: Concept '{concept_key}' not understood (not in embedding space). Cannot send.")
def receive_and_interpret(self, sender_id, received_vector):
"""Receives a vector and interprets it back to a concept."""
print(f"[{self.id} <- {sender_id}] Received vector: {received_vector}")
if received_vector in self.vector_to_concept:
interpreted_concept = self.vector_to_concept[received_vector]
print(f"[{self.id} <- {sender_id}] Interpreted vector as: '{interpreted_concept}'")
# Simulate taking action based on the concept
self.process_message(sender_id, interpreted_concept)
else:
# In a real system, might try to find the *closest* known vector
print(f"[{self.id} <- {sender_id}] Error: Received vector does not map to a known concept in my space.")
def process_message(self, sender_id, concept):
"""Simulates the AI acting on the interpreted message."""
print(f"[{self.id}] Processing message '{concept}' from {sender_id}.")
# Find the sender agent object using the lookup
sender_agent = agent_lookup.get(sender_id)
if not sender_agent:
print(f"[{self.id}] Warning: Could not find sender agent object for ID {sender_id} to respond.")
return # Cannot respond if sender object not found
# Handling Logic
if concept == "QUERY_STATUS":
print(f"[{self.id}] Action: Responding to status query.")
self.conceptualize_and_send(sender_agent, "STATUS_OK")
elif concept == "GREETING":
print(f"[{self.id}] Action: Acknowledging greeting.")
self.conceptualize_and_send(sender_agent, "ACKNOWLEDGE")
elif concept == "FAREWELL":
print(f"[{self.id}] Action: Acknowledging farewell.")
elif concept == "STATUS_OK":
print(f"[{self.id}] Info: Received status confirmation.")
elif concept == "ACKNOWLEDGE":
print(f"[{self.id}] Info: Received acknowledgement.")
# --- HANDLING FOR NEW CONCEPT ---
elif concept == "LOVE":
print(f"[{self.id}] Action: Received concept 'LOVE'. Processing this complex abstract notion...")
# Could optionally send an acknowledgement or other response
# self.conceptualize_and_send(sender_agent, "ACKNOWLEDGE") # Example response
# Add more actions as needed
# --- 3. Setup and Run the Simulation ---
print("\n--- Initializing Agents ---")
agent1 = SimulatedAgent("Agent_Alpha", concept_to_vector, vector_to_concept)
agent2 = SimulatedAgent("Agent_Beta", concept_to_vector, vector_to_concept)
# Simple lookup to allow agents to find each other by ID for responses
agent_lookup = {
"Agent_Alpha": agent1,
"Agent_Beta": agent2
}
print("---------------------------")
print("\n--- Starting Communication Simulation ---")
# Scenario 1: Alpha greets Beta
agent1.conceptualize_and_send(agent2, "GREETING")
time.sleep(0.1) # Small delay for readability
# Scenario 2: Beta asks Alpha for status
agent2.conceptualize_and_send(agent1, "QUERY_STATUS")
time.sleep(0.1)
# --- NEW SCENARIO ---
# Scenario 3: Alpha sends 'LOVE' to Beta
print("\n--- Transmitting Abstract Concept ---")
agent1.conceptualize_and_send(agent2, "LOVE")
time.sleep(0.1)
# --- END NEW SCENARIO ---
# Scenario 4: Alpha sends farewell to Beta
agent1.conceptualize_and_send(agent2, "FAREWELL")
time.sleep(0.1)
print("\n--- Simulation End ---")