commit a23f189267f39b31ea399f9891daa022c40236b7 Author: lightoshadow Date: Thu Dec 11 23:26:21 2025 +0100 Initial Commit diff --git a/.env b/.env new file mode 100644 index 0000000..6eda6d8 --- /dev/null +++ b/.env @@ -0,0 +1,5 @@ +FLASK_ENV=development +UPLOAD_FOLDER=uploads +SEGMENTED_FOLDER=segmented +ALLOWED_EXTENSIONS=.mp4,.avi,.mov,.mkv +SAM_MODEL_SIZE=vit_b \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5fb572f --- /dev/null +++ b/.gitignore @@ -0,0 +1,13 @@ +# Python-generated files +__pycache__/ +*.py[oc] +build/ +dist/ +wheels/ +*.egg-info + +segmented/ +uploads/ + +# Virtual environments +.venv diff --git a/.python-version b/.python-version new file mode 100644 index 0000000..e4fba21 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.12 diff --git a/README.md b/README.md new file mode 100644 index 0000000..f943fbf --- /dev/null +++ b/README.md @@ -0,0 +1,210 @@ +# Video Object Segmentation with SAM2 + +A web application that allows you to upload videos, click on objects, and segment them out using Meta's SAM2 (Segment Anything Model 2) AI model. + +## Features + +- 📤 Upload video files (MP4, AVI, MOV, MKV) +- 🖼️ Preview first frame of the video +- 🎯 Click on objects to select them for segmentation +- ✂️ AI-powered object segmentation using SAM2 +- 🎥 Download segmented video results +- 🎨 Beautiful, responsive user interface + +## Requirements + +- **Python 3.8-3.12** (tested and compatible) +- PyTorch 2.2.0+ with CUDA (recommended for GPU acceleration) +- Flask +- OpenCV +- NumPy +- Segment Anything Model 2 + +**Python Version Compatibility:** +- ✅ Python 3.8, 3.9, 3.10, 3.11, 3.12 all supported +- 🔄 Automatic torch version selection based on Python version +- 💡 Python 3.12 users: Use the updated requirements (torch 2.2.0+) + +## Why use uv? + +We recommend using **uv** for this project because: + +✅ **Faster dependency resolution**: uv is significantly faster than pip +✅ **Better virtual environment management**: Cleaner and more reliable venvs +✅ **Deterministic builds**: More consistent dependency resolution +✅ **Modern Python tooling**: Built with Rust for performance +✅ **Better compatibility**: Handles complex dependency trees better + +If you're working on Python projects, uv is a great modern alternative to pip + virtualenv! + +## Installation + +### 1. Clone the repository + +```bash +git clone https://github.com/yourusername/video-segmentation-sam2.git +cd video-segmentation-sam2 +``` + +### 2. Install dependencies (using uv - recommended) + +First, install uv (a fast Python package installer and virtual environment manager): + +```bash +# Install uv +pip install uv + +# Create virtual environment and install dependencies +uv venv .venv +source .venv/bin/activate # On Windows: .\.venv\Scripts\activate +uv pip install -r requirements.txt +``` + +### 3. Install SAM2 manually + +SAM2 needs to be installed manually from GitHub: + +```bash +# Clone the segment-anything repository +git clone https://github.com/facebookresearch/segment-anything.git +cd segment-anything + +# Install SAM2 in development mode +pip install -e . + +# Download the model checkpoint (ViT-B recommended) +wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth +mv sam_vit_b_01ec64.pth .. +cd .. + +# Clean up +rm -rf segment-anything +``` + +This will: +- Create a virtual environment using uv +- Install all Python dependencies +- Install SAM2 from source +- Download the ViT-B model checkpoint +- Set up necessary directories + +### 3. Alternative: Standard pip installation + +If you prefer not to use uv: + +```bash +pip install -r requirements.txt +python setup.py +``` + +### 3. Download SAM2 model weights + +The application now uses **ViT-B** (smaller, faster model) by default. You need the file `sam_vit_b_01ec64.pth` in the root directory. + +Download from: [https://github.com/facebookresearch/segment-anything](https://github.com/facebookresearch/segment-anything) + +Or use the following command: + +```bash +wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth +``` + +**Model Options:** +- `vit_b` (default): Fastest, good for testing - `sam_vit_b_01ec64.pth` +- `vit_l`: Medium size/performance - `sam_vit_l_0b3195.pth` +- `vit_h`: Best accuracy, largest - `sam_vit_h_4b8939.pth` + +You can change the model by modifying `SAM_MODEL_SIZE` in `app.py`. + +### 4. Run the application + +```bash +python app.py +``` + +The application will start on `http://localhost:5000` + +## Usage + +1. **Upload a video**: Click the "Select Video File" button and choose a video file +2. **Select object**: Click on the object you want to segment in the preview image +3. **Add more points**: Click additional points to help the AI better understand the object +4. **Segment**: Click "Segment Object" to start the segmentation process +5. **Download**: Once processing is complete, preview and download your segmented video + +## Configuration + +You can configure the application by editing the `.env` file: + +```env +FLASK_ENV=development +UPLOAD_FOLDER=uploads +SEGMENTED_FOLDER=segmented +ALLOWED_EXTENSIONS=.mp4,.avi,.mov,.mkv +``` + +## Technical Details + +### Backend + +- **Flask**: Web framework +- **SAM2**: Segment Anything Model 2 for object segmentation +- **OpenCV**: Video processing and frame manipulation +- **PyTorch**: Deep learning framework for running SAM2 + +### Frontend + +- **HTML5/CSS3**: Responsive user interface +- **JavaScript**: Interactive point selection and AJAX requests +- **Base64 encoding**: For preview image transfer + +### Processing Pipeline + +1. Video upload and first frame extraction +2. User selects points on the object to segment +3. SAM2 processes each frame with the selected points +4. Masks are applied to each frame +5. Processed frames are combined into a new video + +## Performance Considerations + +- **GPU recommended**: SAM2 runs much faster with CUDA-enabled GPU +- **Video length**: Longer videos will take more time to process +- **Resolution**: Higher resolution videos require more processing power +- **Points selection**: More points can help with complex objects but may slow down processing + +## Troubleshooting + +### Common Issues + +**Issue: SAM2 model not found** +- Solution: Download the model checkpoint and place it in the root directory + +**Issue: CUDA out of memory** +- Solution: Reduce video resolution or use smaller batch sizes + +**Issue: Slow processing on CPU** +- Solution: Use a machine with GPU or reduce video resolution + +**Issue: Video format not supported** +- Solution: Convert your video to MP4 format + +## License + +This project is licensed under the MIT License. The SAM2 model is provided by Meta Research under its own license. + +## Acknowledgements + +- Meta Research for the Segment Anything Model +- Flask team for the web framework +- OpenCV team for computer vision tools + +## Future Improvements + +- [ ] Add support for multiple object segmentation +- [ ] Implement background removal options +- [ ] Add video trimming functionality +- [ ] Support for real-time preview +- [ ] Batch processing of multiple videos +- [ ] Advanced segmentation parameters (threshold, etc.) +- [ ] Cloud deployment options \ No newline at end of file diff --git a/SETUP_GUIDE.md b/SETUP_GUIDE.md new file mode 100644 index 0000000..70ec9de --- /dev/null +++ b/SETUP_GUIDE.md @@ -0,0 +1,140 @@ +# Video Object Segmentation with SAM2 - Setup Guide + +## Quick Start (macOS/Linux) + +### 1. Install uv + +```bash +pip install uv +``` + +### 2. Create virtual environment and install dependencies + +```bash +uv venv .venv +source .venv/bin/activate +uv pip install -r requirements.txt +``` + +### 3. Install SAM manually + +```bash +# Clone the segment-anything repository +git clone https://github.com/facebookresearch/segment-anything.git +cd segment-anything + +# Install SAM in development mode +pip install -e . + +# Download the ViT-B model checkpoint (recommended) +wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth +mv sam_vit_b_01ec64.pth .. +cd .. + +# Clean up +rm -rf segment-anything +``` + +### 4. Create directories + +```bash +mkdir -p uploads segmented +``` + +### 5. Run the application + +```bash +python app.py +``` + +## Project Structure + +``` +video-segmentation-sam2/ +├── app.py # Main Flask application +├── requirements.txt # Python dependencies +├── .env # Configuration +├── templates/ +│ └── index.html # Web interface +├── uploads/ # Uploaded videos (created automatically) +├── segmented/ # Processed videos (created automatically) +└── sam_vit_b_01ec64.pth # SAM2 model checkpoint +``` + +## Configuration + +Edit `.env` file to customize: + +```env +FLASK_ENV=development +UPLOAD_FOLDER=uploads +SEGMENTED_FOLDER=segmented +ALLOWED_EXTENSIONS=.mp4,.avi,.mov,.mkv +SAM_MODEL_SIZE=vit_b # Options: vit_b, vit_l, vit_h +``` + +## Troubleshooting + +### NumPy compatibility issues + +If you see errors about NumPy 2.x compatibility: +``` +A module that was compiled using NumPy 1.x cannot be run in NumPy 2.3.5 +``` + +**Solution:** The requirements.txt already specifies `numpy<2.0` to avoid this issue. Make sure you: +1. Delete your virtual environment: `rm -rf .venv` +2. Recreate it: `uv venv .venv` +3. Reinstall dependencies: `uv pip install -r requirements.txt` + +### SAM2 not found + +If you get `ImportError: SAM2 is not installed`, make sure you: +1. Cloned the segment-anything repository +2. Ran `pip install -e .` from the segment-anything directory +3. Have the checkpoint file in the root directory + +### CUDA not available + +If you don't have a GPU, the app will use CPU (slower). For better performance: +1. Install CUDA toolkit +2. Install cuDNN +3. Make sure `torch.cuda.is_available()` returns True + +### Port already in use + +If port 5000 is busy: +1. Change the port in `app.py` (last line) +2. Or kill the process using port 5000: + ```bash + lsof -i :5000 + kill -9 + ``` + +## Model Options + +| Model | Checkpoint File | Size | Speed | Accuracy | Best For | +|-------|----------------|------|-------|----------|----------| +| **ViT-B** | `sam_vit_b_01ec64.pth` | Smallest | Fastest | Good | Testing, quick results, lower-end hardware | +| **ViT-L** | `sam_vit_l_0b3195.pth` | Medium | Medium | Better | Balanced performance/quality | +| **ViT-H** | `sam_vit_h_4b8939.pth` | Largest | Slowest | Best | High-quality results, powerful hardware | + +To change models: +1. Download the desired checkpoint +2. Update `SAM_MODEL_SIZE` in `.env` +3. Restart the application + +## Using the Application + +1. **Upload**: Select a video file (MP4, AVI, MOV, MKV) +2. **Preview**: See the first frame of your video +3. **Select**: Click on the object you want to segment +4. **Process**: Click "Segment Object" to start processing +5. **Download**: Get your segmented video + +## Performance Tips + +- **GPU Acceleration**: SAM2 runs much faster with CUDA +- **Video Length**: Shorter videos process faster +- **Resolution**: Lower resolutions are quicker to process +- **Points**: 3-5 well-placed points usually work best \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000..abc15a4 --- /dev/null +++ b/app.py @@ -0,0 +1,364 @@ +import os +import cv2 +import numpy as np +from flask import Flask, request, jsonify, send_from_directory, render_template +from flask_cors import CORS +from werkzeug.utils import secure_filename +from dotenv import load_dotenv +import torch +from segment_anything import SamPredictor, sam_model_registry +import tempfile +import base64 +from io import BytesIO +from PIL import Image + +# Load environment variables +load_dotenv() + +app = Flask(__name__) +CORS(app) + +# Configuration +app.config['UPLOAD_FOLDER'] = os.getenv('UPLOAD_FOLDER', 'uploads') +app.config['SEGMENTED_FOLDER'] = os.getenv('SEGMENTED_FOLDER', 'segmented') +app.config['ALLOWED_EXTENSIONS'] = set(os.getenv('ALLOWED_EXTENSIONS', '.mp4,.avi,.mov,.mkv').split(',')) + +# Ensure directories exist +os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True) +os.makedirs(app.config['SEGMENTED_FOLDER'], exist_ok=True) + +# Initialize SAM2 model +def initialize_sam2(model_size="vit_b"): + """Initialize the SAM2 model""" + print(f"Initializing SAM2 model ({model_size})...") + + try: + from segment_anything import SamPredictor, sam_model_registry + except ImportError: + raise ImportError( + "SAM is not installed. Please install it manually from GitHub:\n" + "git clone https://github.com/facebookresearch/segment-anything.git\n" + "cd segment-anything\n" + "pip install -e .\n" + "Then download the model checkpoint and place it in the root directory." + ) + + # Map model sizes to checkpoint files + model_configs = { + "vit_h": { + "checkpoint": "sam_vit_h_4b8939.pth", + "model_type": "vit_h" + }, + "vit_l": { + "checkpoint": "sam_vit_l_0b3195.pth", + "model_type": "vit_l" + }, + "vit_b": { + "checkpoint": "sam_vit_b_01ec64.pth", + "model_type": "vit_b" + } + } + + if model_size not in model_configs: + raise ValueError(f"Unknown model size: {model_size}. Choose from: vit_h, vit_l, vit_b") + + config = model_configs[model_size] + sam_checkpoint = config["checkpoint"] + model_type = config["model_type"] + + # Check if checkpoint file exists + if not os.path.exists(sam_checkpoint): + raise FileNotFoundError( + f"SAM2 checkpoint file '{sam_checkpoint}' not found. " + f"Please download it from https://github.com/facebookresearch/segment-anything " + f"and place it in the root directory." + ) + + device = "cuda" if torch.cuda.is_available() else "cpu" + + sam = sam_model_registry[model_type](checkpoint=sam_checkpoint) + sam.to(device=device) + predictor = SamPredictor(sam) + + print(f"SAM2 model ({model_type}) initialized on {device}") + return predictor + +# Global predictor instance +sam_predictor = None + +# Configuration for SAM model +SAM_MODEL_SIZE = os.getenv('SAM_MODEL_SIZE', 'vit_b') # Read from .env or default to ViT-B + +def allowed_file(filename): + """Check if file has allowed extension""" + if '.' not in filename: + return False + + # Get the file extension with dot (e.g., '.mp4') + file_extension = '.' + filename.rsplit('.', 1)[1].lower() + + # Debug + print(f"🔍 Checking extension: {file_extension}") + print(f"📋 Allowed extensions: {app.config['ALLOWED_EXTENSIONS']}") + + return file_extension in app.config['ALLOWED_EXTENSIONS'] + +@app.route('/') +def index(): + """Main page""" + return render_template('index.html') + +@app.route('/test') +def test(): + """Test route""" + return jsonify({'status': 'ok', 'message': 'Flask app is running'}) + +@app.route('/upload', methods=['POST']) +def upload_video(): + """Handle video upload""" + print("📤 Upload request received") + + if 'file' not in request.files: + print("❌ No file part in request") + return jsonify({'error': 'No file part'}), 400 + + file = request.files['file'] + if file.filename == '': + print("❌ No selected file") + return jsonify({'error': 'No selected file'}), 400 + + print(f"📁 File received: {file.filename}") + print(f"📊 File size: {len(file.read())} bytes") + file.seek(0) # Reset file pointer after reading + + # Debug file extension + filename = secure_filename(file.filename) + file_extension = filename.rsplit('.', 1)[1].lower() if '.' in filename else '' + print(f"🔍 File extension: .{file_extension}") + print(f"📋 Allowed extensions: {app.config['ALLOWED_EXTENSIONS']}") + + if file and allowed_file(file.filename): + filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename) + file.save(filepath) + + print(f"✅ File saved: {filepath}") + + # Extract first frame for preview + preview_frame = extract_first_frame(filepath) + + if preview_frame is None: + print("⚠️ Could not extract preview frame, using placeholder") + return jsonify({ + 'message': 'File uploaded successfully (no preview available)', + 'filename': filename, + 'preview': None + }) + + print("🖼️ Preview frame extracted successfully") + return jsonify({ + 'message': 'File uploaded successfully', + 'filename': filename, + 'preview': preview_frame + }) + else: + print(f"❌ File type not allowed: {file_extension}") + return jsonify({'error': f'File type .{file_extension} not allowed. Allowed types: {app.config["ALLOWED_EXTENSIONS"]}'}), 400 + +def extract_first_frame(video_path): + """Extract first frame from video""" + try: + # Check if file exists + if not os.path.exists(video_path): + print(f"❌ Video file not found: {video_path}") + return None + + cap = cv2.VideoCapture(video_path) + + # Check if video opened successfully + if not cap.isOpened(): + print(f"❌ Could not open video file: {video_path}") + return None + + ret, frame = cap.read() + cap.release() + + if ret and frame is not None: + # Convert to base64 for easy transfer + success, buffer = cv2.imencode('.jpg', frame) + if success: + frame_base64 = base64.b64encode(buffer).decode('utf-8') + print(f"✅ Successfully extracted first frame from {video_path}") + return frame_base64 + else: + print(f"❌ Failed to encode frame as JPEG") + return None + else: + print(f"❌ Could not read first frame from {video_path}") + return None + except Exception as e: + print(f"❌ Error extracting first frame: {e}") + return None + +@app.route('/segment', methods=['POST']) +def segment_object(): + """Handle object segmentation""" + print("🎯 Segment request received") + + global sam_predictor + + if sam_predictor is None: + print("🔧 Initializing SAM model...") + sam_predictor = initialize_sam2(SAM_MODEL_SIZE) + + data = request.json + print(f"📥 Received data: {data}") + + if not data or 'filename' not in data or 'points' not in data: + print("❌ Missing required parameters") + return jsonify({'error': 'Missing required parameters'}), 400 + + filename = data['filename'] + points = data['points'] # Expecting [[x1, y1], [x2, y2], ...] + + video_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) + + if not os.path.exists(video_path): + return jsonify({'error': 'Video file not found'}), 404 + + try: + # Process the video + output_path = process_video_segmentation(video_path, points) + + output_filename = os.path.basename(output_path) + print(f"✅ Segmentation completed: {output_filename}") + print(f"📁 Output file path: {output_path}") + print(f"🔍 File exists: {os.path.exists(output_path)}") + print(f"📊 File size: {os.path.getsize(output_path)} bytes") + + return jsonify({ + 'message': 'Segmentation completed', + 'output_filename': output_filename, + 'debug_file_path': output_path, + 'debug_file_exists': os.path.exists(output_path) + }) + + except Exception as e: + return jsonify({'error': str(e)}), 500 + +def process_video_segmentation(video_path, points): + """Process video segmentation using SAM2""" + global sam_predictor + + # Create output filename + base_name = os.path.splitext(os.path.basename(video_path))[0] + output_filename = f"{base_name}_segmented.mp4" + output_path = os.path.join(app.config['SEGMENTED_FOLDER'], output_filename) + + # Open video + cap = cv2.VideoCapture(video_path) + + # Get video properties + fps = cap.get(cv2.CAP_PROP_FPS) + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + # Create video writer + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) + + frame_count = 0 + + while cap.isOpened(): + ret, frame = cap.read() + + if not ret: + break + + frame_count += 1 + print(f"Processing frame {frame_count}/{total_frames}") + + # Convert frame to RGB (SAM expects RGB) + frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + + # Set image for SAM + sam_predictor.set_image(frame_rgb) + + # Convert points to numpy array + input_points = np.array(points) + input_labels = np.array([1] * len(points)) # 1 means foreground point + + # Get masks + masks, scores, logits = sam_predictor.predict( + point_coords=input_points, + point_labels=input_labels, + multimask_output=False + ) + + # Create mask from the best prediction + mask = masks[0].astype(np.uint8) * 255 + + # Apply mask to frame (simple approach - you can customize this) + masked_frame = apply_mask_to_frame(frame, mask) + + # Write frame + out.write(masked_frame) + + cap.release() + out.release() + + return output_path + +def apply_mask_to_frame(frame, mask): + """Apply mask to frame - simple implementation""" + # Create a colored version of the mask (red overlay) + colored_mask = np.zeros_like(frame) + colored_mask[:, :, 2] = mask # Red channel + + # Blend the mask with the original frame + alpha = 0.5 + result = cv2.addWeighted(frame, 1 - alpha, colored_mask, alpha, 0) + + return result + +@app.route('/download/') +def download_file(filename): + """Download segmented video""" + return send_from_directory( + app.config['SEGMENTED_FOLDER'], + filename, + as_attachment=True + ) + +@app.route('/preview/') +def preview_video(filename): + """Preview original video""" + return send_from_directory( + app.config['UPLOAD_FOLDER'], + filename + ) + +@app.route('/segmented/') +def serve_segmented_video(filename): + """Serve segmented video with proper range request support""" + file_path = os.path.join(app.config['SEGMENTED_FOLDER'], filename) + print(f"🎬 Video request for: {filename}") + print(f"📁 Looking for file at: {file_path}") + print(f"🔍 File exists: {os.path.exists(file_path)}") + + if not os.path.exists(file_path): + print(f"❌ File not found: {file_path}") + return jsonify({'error': f'File {filename} not found'}), 404 + + print(f"✅ Serving file: {file_path}") + + # Use send_from_directory with proper MIME type for video + return send_from_directory( + app.config['SEGMENTED_FOLDER'], + filename, + conditional=True, + mimetype='video/mp4' + ) + +if __name__ == '__main__': + app.run(debug=True, port=5000) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..bb21d6e --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,7 @@ +[project] +name = "censorall" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.12" +dependencies = [] diff --git a/requirements-uv.txt b/requirements-uv.txt new file mode 100644 index 0000000..6693770 --- /dev/null +++ b/requirements-uv.txt @@ -0,0 +1,14 @@ +# UV-compatible requirements +# Generated from requirements.txt + +flask==2.3.2 +flask-cors==3.0.10 +opencv-python==4.7.0.72 +numpy==1.24.3 +pillow==9.5.0 +segment-anything-2==0.1.0 +torch==2.0.1 +torchvision==0.15.2 +moviepy==1.0.3 +python-dotenv==1.0.0 +werkzeug==2.3.7 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..27ac635 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +flask==2.3.2 +flask-cors==3.0.10 +opencv-python==4.7.0.72 +numpy<2.0 +pillow==9.5.0 +torch==2.2.0 +torchvision==0.17.0 +python-dotenv==1.0.0 \ No newline at end of file diff --git a/sam_vit_b_01ec64.pth b/sam_vit_b_01ec64.pth new file mode 100644 index 0000000..538f0f2 Binary files /dev/null and b/sam_vit_b_01ec64.pth differ diff --git a/templates/index.html b/templates/index.html new file mode 100644 index 0000000..7fbdc60 --- /dev/null +++ b/templates/index.html @@ -0,0 +1,485 @@ + + + + + + Video Object Segmentation with SAM2 + + + +
+
+

🎥 Video Object Segmentation with SAM2

+

Upload a video, click on objects, and let AI segment them out!

+
+ +
+
+

📤 Upload Your Video

+

Supported formats: MP4, AVI, MOV, MKV

+ + +

+
+ + + + + + +
+
+ + + + \ No newline at end of file diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000..43eee49 --- /dev/null +++ b/uv.lock @@ -0,0 +1,8 @@ +version = 1 +revision = 3 +requires-python = ">=3.12" + +[[package]] +name = "censorall" +version = "0.1.0" +source = { virtual = "." }