SoFunction
Updated on 2025-03-04

Python implements Ollama's prompt word generation and optimization

1. Basic environment configuration

import requests
import json
from typing import List, Dict, Optional
from dataclasses import dataclass

@dataclass
class PromptContext:
    task: str
    domain: str
    requirements: List[str]

class OllamaService:
    def __init__(self, base_url: str = "http://localhost:11434"):
        self.base_url = base_url
         = {
            'mistral': 'mistral',
            'llama2': 'llama2',
            'neural-chat': 'neural-chat'
        }

2. Core function implementation

2.1 Prompt word generation service

class PromptGenerationService:
    def __init__(self, model_name: str = 'mistral'):
        self.model_name = model_name
        self.api_url = "http://localhost:11434/api/generate"

    async def generate_prompt(self, context: PromptContext) -> str:
        prompt = f"""
        Task: Create a detailed prompt for the following context:
        - Task Type: {}
        - Domain: {}
        - Requirements: {', '.join()}
        
        Generate a structured prompt that includes:
        1. Context setting
        2. Specific requirements
        3. Output format
        4. Constraints
        5. Examples (if applicable)
        """

        response = (
            self.api_url,
            json={
                "model": self.model_name,
                "prompt": prompt,
                "stream": False
            }
        )
        
        return ()["response"]

    async def optimize_prompt(self, original_prompt: str) -> Dict:
        prompt = f"""
        Analyze and optimize the following prompt:
        "{original_prompt}"
        
        Provide:
        1. Improved version
        2. Explanation of changes
        3. Potential variations
        """

        response = (
            self.api_url,
            json={
                "model": self.model_name,
                "prompt": prompt,
                "stream": False
            }
        )
        
        return ()["response"]

2.2 Prompt word template management

class PromptTemplates:
    @staticmethod
    def get_code_review_template(code: str) -> str:
        return f"""
        Analyze the following code:
        [code]
        
        Provide:
        1. Code quality assessment
        2. Potential improvements
        3. Security concerns
        4. Performance optimization
        """

    @staticmethod
    def get_documentation_template(component: str) -> str:
        return f"""
        Generate documentation for:
        {component}
        
        Include:
        1. Overview
        2. API reference
        3. Usage examples
        4. Best practices
        """

    @staticmethod
    def get_refactoring_template(code: str) -> str:
        return f"""
        Suggest refactoring for:
        [code]
        
        Consider:
        1. Design patterns
        2. Clean code principles
        3. Performance impact
        4. Maintainability
        """

3. Use examples

async def main():
    # Initialize the service    prompt_service = PromptGenerationService(model_name='mistral')
    
    # Example of code generation prompt word    code_context = PromptContext(
        task='code_generation',
        domain='web_development',
        requirements=[
            'React component',
            'TypeScript',
            'Material UI',
            'Form handling'
        ]
    )
    
    code_prompt = await prompt_service.generate_prompt(code_context)
    print("Code Generation Prompt Word:", code_prompt)
    
    # Example of document generation prompt word    doc_context = PromptContext(
        task='documentation',
        domain='API_reference',
        requirements=[
            'OpenAPI format',
            'Examples included',
            'Error handling',
            'Authentication details'
        ]
    )
    
    doc_prompt = await prompt_service.generate_prompt(doc_context)
    print("Document generation prompt word:", doc_prompt)

    # Prompt word optimization example    original_prompt = "Write a React component"
    optimized_prompt = await prompt_service.optimize_prompt(original_prompt)
    print("Optimized prompt words:", optimized_prompt)

if __name__ == "__main__":
    import asyncio
    (main())

4. Tool class implementation

class PromptUtils:
    @staticmethod
    def format_requirements(requirements: List[str]) -> str:
        return "\n".join([f"- {req}" for req in requirements])

    @staticmethod
    def validate_prompt(prompt: str) -> bool:
        # Simple prompt word verification        return len(()) > 0

    @staticmethod
    def enhance_prompt(prompt: str) -> str:
        # Add common prompt words to enhance        return f"""
        {prompt}
        
        Additional requirements:
        - Provide clear and detailed explanations
        - Include practical examples
        - Consider edge cases
        - Follow best practices
        """

5. Error handling

class PromptGenerationError(Exception):
    pass

class ModelConnectionError(Exception):
    pass

def handle_api_errors(func):
    async def wrapper(*args, **kwargs):
        try:
            return await func(*args, **kwargs)
        except :
            raise ModelConnectionError("Unable to connect to Ollama service")
        except Exception as e:
            raise PromptGenerationError(f"Prompt word generation error: {str(e)}")
    return wrapper

6. Configuration Management

class Config:
    MODELS = {
        'mistral': {
            'name': 'mistral',
            'description': 'Fast, lightweight prompt word generation',
            'parameters': {
                'temperature': 0.7,
                'max_tokens': 2000
            }
        },
        'llama2': {
            'name': 'llama2',
            'description': 'Complex, detailed prompt word requirements',
            'parameters': {
                'temperature': 0.8,
                'max_tokens': 4000
            }
        },
        'neural-chat': {
            'name': 'neural-chat',
            'description': 'Interactive Prompt Word Optimization',
            'parameters': {
                'temperature': 0.9,
                'max_tokens': 3000
            }
        }
    }

Using this Python implementation, you can:

  • Generate structured prompt words
  • Optimize existing prompt words
  • Using predefined templates
  • Handle prompt word requirements for various scenarios

Key advantages:

  • Object-oriented design
  • Asynchronous support
  • Error handling
  • Type Tips
  • Configuration Management
  • Modular structure

This implementation can be used as a basic framework to expand and customize according to specific needs.

This is the article about the generation and optimization of prompt words for Python implementation of Ollama. For more related Python Ollama prompt words, please search for my previous articles or continue browsing the related articles below. I hope everyone will support me in the future!